Commit a23b8549 authored by Anthony Larcher's avatar Anthony Larcher
Browse files

monitor display

parent ffd57577
...@@ -398,7 +398,7 @@ class TrainingMonitor(): ...@@ -398,7 +398,7 @@ class TrainingMonitor():
self.val_acc.append(val_acc) self.val_acc.append(val_acc)
# remember best accuracy and save checkpoint # remember best accuracy and save checkpoint
if self.compute_test_eer and test_eer if self.compute_test_eer and test_eer:
self.test_eer.append(test_eer) self.test_eer.append(test_eer)
self.is_best = test_eer < self.best_eer self.is_best = test_eer < self.best_eer
self.best_eer = min(test_eer, self.best_eer) self.best_eer = min(test_eer, self.best_eer)
...@@ -1354,14 +1354,6 @@ def new_xtrain(dataset_description, ...@@ -1354,14 +1354,6 @@ def new_xtrain(dataset_description,
training_opts, training_opts,
speaker_number) speaker_number)
# Select a subset of non-target trials to reduce the number of tests
#import ipdb
#ipdb.set_trace()
#tar_non_ratio = numpy.sum(validation_tar_indices)/numpy.sum(validation_non_indices)
#validation_non_indices *= numpy.random.choice([False, True],
# size=validation_non_indices.shape,
# p=[1-tar_non_ratio, tar_non_ratio])
logging.info(f"Validation EER will be measured using") logging.info(f"Validation EER will be measured using")
logging.info(f"\t {numpy.sum(validation_tar_indices)} target trials and") logging.info(f"\t {numpy.sum(validation_tar_indices)} target trials and")
logging.info(f"\t {numpy.sum(validation_non_indices)} non-target trials") logging.info(f"\t {numpy.sum(validation_non_indices)} non-target trials")
...@@ -1377,18 +1369,20 @@ def new_xtrain(dataset_description, ...@@ -1377,18 +1369,20 @@ def new_xtrain(dataset_description,
monitor = TrainingMonitor(output_file="log/training_xv.log", monitor = TrainingMonitor(output_file="log/training_xv.log",
patience=training_opts["patience"], patience=training_opts["patience"],
best_accuracy=0.0, best_accuracy=0.0,
best_accuracy_epoch=1, best_eer_epoch=1,
best_eer=100, best_eer=100,
compute_test_eer=training_opts["compute_test_eer"]) compute_test_eer=training_opts["compute_test_eer"])
for epoch in range(1, training_opts["epochs"] + 1): for epoch in range(1, training_opts["epochs"] + 1):
monitor.update(epoch)
# Process one epoch and return the current model # Process one epoch and return the current model
if monitor.current_patience == 0: if monitor.current_patience == 0:
print(f"Stopping at epoch {epoch} for cause of patience") print(f"Stopping at epoch {epoch} for cause of patience")
break break
model = train_epoch(model, model = new_train_epoch(model,
epoch, epoch,
training_loader, training_loader,
optimizer, optimizer,
...@@ -1412,13 +1406,15 @@ def new_xtrain(dataset_description, ...@@ -1412,13 +1406,15 @@ def new_xtrain(dataset_description,
if training_opts["compute_test_eer"]: if training_opts["compute_test_eer"]:
test_eer = new_test_metrics(model, device, training_opts["mixed_precision"]) test_eer = new_test_metrics(model, device, training_opts["mixed_precision"])
# The update of the TrainingMonitor includes the display
monitor.update(epoch, monitor.update(epoch,
test_eer=test_eer, test_eer=test_eer,
val_eer=val_eer, val_eer=val_eer,
val_loss=val_loss, val_loss=val_loss,
val_acc=val_acc) val_acc=val_acc)
monitor.display()
# Save the current model and if needed update the best one # Save the current model and if needed update the best one
# TODO ajouter une option qui garde les modèles à certaines époques (par exemple avant le changement de LR # TODO ajouter une option qui garde les modèles à certaines époques (par exemple avant le changement de LR
save_model(model, monitor, model_opts, training_opts, optimizer, scheduler) save_model(model, monitor, model_opts, training_opts, optimizer, scheduler)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment