Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Anthony Larcher
sidekit
Commits
a23b8549
Commit
a23b8549
authored
Apr 08, 2021
by
Anthony Larcher
Browse files
monitor display
parent
ffd57577
Changes
1
Hide whitespace changes
Inline
Side-by-side
nnet/xvector.py
View file @
a23b8549
...
...
@@ -398,7 +398,7 @@ class TrainingMonitor():
self
.
val_acc
.
append
(
val_acc
)
# remember best accuracy and save checkpoint
if
self
.
compute_test_eer
and
test_eer
if
self
.
compute_test_eer
and
test_eer
:
self
.
test_eer
.
append
(
test_eer
)
self
.
is_best
=
test_eer
<
self
.
best_eer
self
.
best_eer
=
min
(
test_eer
,
self
.
best_eer
)
...
...
@@ -1354,14 +1354,6 @@ def new_xtrain(dataset_description,
training_opts
,
speaker_number
)
# Select a subset of non-target trials to reduce the number of tests
#import ipdb
#ipdb.set_trace()
#tar_non_ratio = numpy.sum(validation_tar_indices)/numpy.sum(validation_non_indices)
#validation_non_indices *= numpy.random.choice([False, True],
# size=validation_non_indices.shape,
# p=[1-tar_non_ratio, tar_non_ratio])
logging
.
info
(
f
"Validation EER will be measured using"
)
logging
.
info
(
f
"
\t
{
numpy
.
sum
(
validation_tar_indices
)
}
target trials and"
)
logging
.
info
(
f
"
\t
{
numpy
.
sum
(
validation_non_indices
)
}
non-target trials"
)
...
...
@@ -1377,18 +1369,20 @@ def new_xtrain(dataset_description,
monitor
=
TrainingMonitor
(
output_file
=
"log/training_xv.log"
,
patience
=
training_opts
[
"patience"
],
best_accuracy
=
0.0
,
best_
accuracy
_epoch
=
1
,
best_
eer
_epoch
=
1
,
best_eer
=
100
,
compute_test_eer
=
training_opts
[
"compute_test_eer"
])
for
epoch
in
range
(
1
,
training_opts
[
"epochs"
]
+
1
):
monitor
.
update
(
epoch
)
# Process one epoch and return the current model
if
monitor
.
current_patience
==
0
:
print
(
f
"Stopping at epoch
{
epoch
}
for cause of patience"
)
break
model
=
train_epoch
(
model
,
model
=
new_
train_epoch
(
model
,
epoch
,
training_loader
,
optimizer
,
...
...
@@ -1412,13 +1406,15 @@ def new_xtrain(dataset_description,
if
training_opts
[
"compute_test_eer"
]:
test_eer
=
new_test_metrics
(
model
,
device
,
training_opts
[
"mixed_precision"
])
# The update of the TrainingMonitor includes the display
monitor
.
update
(
epoch
,
test_eer
=
test_eer
,
val_eer
=
val_eer
,
val_loss
=
val_loss
,
val_acc
=
val_acc
)
monitor
.
display
()
# Save the current model and if needed update the best one
# TODO ajouter une option qui garde les modèles à certaines époques (par exemple avant le changement de LR
save_model
(
model
,
monitor
,
model_opts
,
training_opts
,
optimizer
,
scheduler
)
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment