Commit 33750df6 authored by Anthony Larcher's avatar Anthony Larcher
Browse files

monitor display

parent 96f24f3e
......@@ -1330,9 +1330,9 @@ def new_xtrain(dataset_description,
compute_test_eer=training_opts["compute_test_eer"])
# Display the entire configurations as YAML dictionnaries
logging.info(yaml.dump(dataset_opts, default_flow_style=False))
logging.info(yaml.dump(model_opts, default_flow_style=False))
logging.info(yaml.dump(training_opts, default_flow_style=False))
monitor.logger.info(yaml.dump(dataset_opts, default_flow_style=False))
monitor.logger.info(yaml.dump(model_opts, default_flow_style=False))
monitor.logger.info(yaml.dump(training_opts, default_flow_style=False))
# Set all the seeds
numpy.random.seed(training_opts["seed"]) # Set the random seed of numpy for the data split.
......@@ -1352,18 +1352,18 @@ def new_xtrain(dataset_description,
model.to(device)
logging.info(f"Start process at {time.strftime('%H:%M:%S', time.localtime())}")
logging.info(f"Use \t{torch.cuda.device_count()} \tgpus")
logging.info(f"Use \t{training_opts['num_cpu']} \tcpus")
monitor.logger.info(f"Start process at {time.strftime('%H:%M:%S', time.localtime())}")
monitor.logger.info(f"Use \t{torch.cuda.device_count()} \tgpus")
monitor.logger.info(f"Use \t{training_opts['num_cpu']} \tcpus")
# Initialise data loaders
training_loader, validation_loader, validation_tar_indices, validation_non_indices = get_loaders(dataset_opts,
training_opts,
speaker_number)
logging.info(f"Validation EER will be measured using")
logging.info(f"\t {numpy.sum(validation_tar_indices)} target trials and")
logging.info(f"\t {numpy.sum(validation_non_indices)} non-target trials")
monitor.logger.info(f"Validation EER will be measured using")
monitor.logger.info(f"\t {numpy.sum(validation_tar_indices)} target trials and")
monitor.logger.info(f"\t {numpy.sum(validation_non_indices)} non-target trials")
# Create optimizer and scheduler
optimizer, scheduler = get_optimizer(model, model_opts, training_opts)
......@@ -1416,7 +1416,7 @@ def new_xtrain(dataset_description,
save_model(model, monitor, model_opts, training_opts, optimizer, scheduler)
for ii in range(torch.cuda.device_count()):
logging.debug(torch.cuda.memory_summary(ii))
monitor.logger.info(torch.cuda.memory_summary(ii))
# TODO gérer l'affichage en utilisant le training_monitor
monitor.display_final()
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment