Commit c6d986f7 authored by Martin Lebourdais's avatar Martin Lebourdais
Browse files

bug fix

parent dceace67
......@@ -384,8 +384,8 @@ def seqTrain(dataset_yaml,
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', verbose=True)
best_accuracy = 0.0
best_accuracy_epoch = 1
best_fmes = 0.0
best_fmes_epoch = 1
curr_patience = patience
for epoch in range(1, epochs + 1):
# Process one epoch and return the current model
......@@ -408,8 +408,8 @@ def seqTrain(dataset_yaml,
print(f"Learning rate is {optimizer.param_groups[0]['lr']}")
## remember best accuracy and save checkpoint
is_best = accuracy > best_accuracy
best_accuracy = max(accuracy, best_accuracy)
is_best = fmes > best_fmes
best_fmes = max(fmes, best_fmes)
if type(model) is SeqToSeq:
save_checkpoint({
......@@ -429,12 +429,12 @@ def seqTrain(dataset_yaml,
}, is_best, filename=tmp_model_name + ".pt", best_filename=best_model_name + '.pt')
if is_best:
best_accuracy_epoch = epoch
best_fmes_epoch = epoch
curr_patience = patience
else:
curr_patience -= 1
logging.critical(f"Best accuracy {best_accuracy * 100.} obtained at epoch {best_accuracy_epoch}")
logging.critical(f"Best F-Mesure {best_fmes * 100.} obtained at epoch {best_fmes_epoch}")
def calc_recall(output,target,device):
y_trueb = target.to(device)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment