告密者的下场(2 / 3)
作品:《印度神话》return t, [index]
def __len__(self):
return len()
def cnn_classification():
batch_size = 256
trainDataLoader = DataLoader(TrainingDataSet(), batch_size=batch_size, shuffle=False)
testDataLoader = DataLoader(TestDataSet(), batch_size=batch_size, shuffle=False)
epoch_num = 200
#lr =
lr =
net = VGGBaseSimpleS2().to(device)
print(net)
# loss
loss_func = ()
# optimizer
optimizer = ((), lr=lr)
# optimizer = ((), lr=lr, momentum=, weight_decay=5e-4)
scheduler = .StepLR(optimizer, step_size=5, gamma=)
if not (“logCNN“):
(“logCNN“)
writer = (“logCNN“)
for epoch in range(epoch_num):
train_sum_loss = 0
train_sum_correct = 0
train_sum_fp = 0
train_sum_fn = 0
train_sum_tp = 0
train_sum_tn = 0
for i, data in enumerate(trainDataLoader):
()
inputs, labels = data
inputs = (1).to()
labels = ()
inputs, labels = (device), (device)
outputs = net(inputs)
loss = loss_func(outputs, labels)
()
()
()
_, pred = (, dim=1)
acc = ().cpu().sum()
one = (labels)
zero = (labels)
tn = ((labels == zero) * (pred == zero)).sum()
tp = ((labels == one) * (pred == one)).sum()
fp = ((labels == zero) * (pred == one)).sum()
fn = ((labels == one) * (pred == zero)).sum()
train_sum_fn += ()
train_sum_fp += ()
train_sum_tn += ()
train_sum_tp += ()
train_sum_loss += ()
train_sum_correct += ()
train_loss = train_sum_loss * / len(trainDataLoader)
train_correct = train_sum_correct * / len(trainDataLoader) / batch_size
train_precision = train_sum_tp * / (train_sum_fp + train_sum_tp)
train_recall = train_sum_tp * / (train_sum_fn + train_sum_tp)
(“train loss“, train_loss, global_step=epoch)
(“train correct“,
train_correct, global_step=epoch)
(“train precision“,
train_precision, global_step=epoch)
(“train recall“, train_recall, global_step=epoch)
if not (“models_aug_CNN“):
(“models_aug_CNN“)
((), “models_aug_CNN/{}.pth“.format(epoch + 1))