Skip to content
GitLab
Menu
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Anthony Larcher
sidekit
Commits
33750df6
Commit
33750df6
authored
Apr 08, 2021
by
Anthony Larcher
Browse files
monitor display
parent
96f24f3e
Changes
1
Hide whitespace changes
Inline
Side-by-side
nnet/xvector.py
View file @
33750df6
...
...
@@ -1330,9 +1330,9 @@ def new_xtrain(dataset_description,
compute_test_eer
=
training_opts
[
"compute_test_eer"
])
# Display the entire configurations as YAML dictionnaries
logg
ing
.
info
(
yaml
.
dump
(
dataset_opts
,
default_flow_style
=
False
))
logg
ing
.
info
(
yaml
.
dump
(
model_opts
,
default_flow_style
=
False
))
logg
ing
.
info
(
yaml
.
dump
(
training_opts
,
default_flow_style
=
False
))
monitor
.
logg
er
.
info
(
yaml
.
dump
(
dataset_opts
,
default_flow_style
=
False
))
monitor
.
logg
er
.
info
(
yaml
.
dump
(
model_opts
,
default_flow_style
=
False
))
monitor
.
logg
er
.
info
(
yaml
.
dump
(
training_opts
,
default_flow_style
=
False
))
# Set all the seeds
numpy
.
random
.
seed
(
training_opts
[
"seed"
])
# Set the random seed of numpy for the data split.
...
...
@@ -1352,18 +1352,18 @@ def new_xtrain(dataset_description,
model
.
to
(
device
)
logg
ing
.
info
(
f
"Start process at
{
time
.
strftime
(
'%H
:
%
M
:
%
S
', time.localtime())
}
"
)
logg
ing
.
info
(
f
"Use
\t
{
torch
.
cuda
.
device_count
()
}
\t
gpus"
)
logg
ing
.
info
(
f
"Use
\t
{
training_opts
[
'num_cpu'
]
}
\t
cpus"
)
monitor
.
logg
er
.
info
(
f
"Start process at
{
time
.
strftime
(
'%H
:
%
M
:
%
S
', time.localtime())
}
"
)
monitor
.
logg
er
.
info
(
f
"Use
\t
{
torch
.
cuda
.
device_count
()
}
\t
gpus"
)
monitor
.
logg
er
.
info
(
f
"Use
\t
{
training_opts
[
'num_cpu'
]
}
\t
cpus"
)
# Initialise data loaders
training_loader
,
validation_loader
,
validation_tar_indices
,
validation_non_indices
=
get_loaders
(
dataset_opts
,
training_opts
,
speaker_number
)
logg
ing
.
info
(
f
"Validation EER will be measured using"
)
logg
ing
.
info
(
f
"
\t
{
numpy
.
sum
(
validation_tar_indices
)
}
target trials and"
)
logg
ing
.
info
(
f
"
\t
{
numpy
.
sum
(
validation_non_indices
)
}
non-target trials"
)
monitor
.
logg
er
.
info
(
f
"Validation EER will be measured using"
)
monitor
.
logg
er
.
info
(
f
"
\t
{
numpy
.
sum
(
validation_tar_indices
)
}
target trials and"
)
monitor
.
logg
er
.
info
(
f
"
\t
{
numpy
.
sum
(
validation_non_indices
)
}
non-target trials"
)
# Create optimizer and scheduler
optimizer
,
scheduler
=
get_optimizer
(
model
,
model_opts
,
training_opts
)
...
...
@@ -1416,7 +1416,7 @@ def new_xtrain(dataset_description,
save_model
(
model
,
monitor
,
model_opts
,
training_opts
,
optimizer
,
scheduler
)
for
ii
in
range
(
torch
.
cuda
.
device_count
()):
logging
.
debug
(
torch
.
cuda
.
memory_summary
(
ii
))
monitor
.
logger
.
info
(
torch
.
cuda
.
memory_summary
(
ii
))
# TODO gérer l'affichage en utilisant le training_monitor
monitor
.
display_final
()
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment