Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Anthony Larcher
sidekit
Commits
c4507e81
Commit
c4507e81
authored
Jan 17, 2022
by
Le Lan Gaël
Browse files
bugfixes
- disabling mp3 and vorbis codecs due to errors
parent
d163cb62
Changes
2
Hide whitespace changes
Inline
Side-by-side
nnet/augmentation.py
View file @
c4507e81
...
...
@@ -265,8 +265,8 @@ def data_augmentation(speech,
configs
=
[
({
"format"
:
"wav"
,
"encoding"
:
'ULAW'
,
"bits_per_sample"
:
8
},
"8 bit mu-law"
),
({
"format"
:
"wav"
,
"encoding"
:
'ALAW'
,
"bits_per_sample"
:
8
},
"8 bit a-law"
),
({
"format"
:
"mp3"
,
"compression"
:
-
9
},
"MP3"
),
({
"format"
:
"vorbis"
,
"compression"
:
-
1
},
"Vorbis"
)
#
({"format": "mp3", "compression": -9}, "MP3"),
#
({"format": "vorbis", "compression": -1}, "Vorbis")
]
param
,
title
=
random
.
choice
(
configs
)
speech
=
torchaudio
.
functional
.
apply_codec
(
speech
,
sample_rate
,
**
param
)
...
...
nnet/xvector.py
View file @
c4507e81
...
...
@@ -634,10 +634,10 @@ class Xtractor(torch.nn.Module):
elif
self
.
loss
==
'aps'
:
self
.
after_speaker_embedding
=
SoftmaxAngularProto
(
int
(
self
.
speaker_number
))
self
.
preprocessor_weight_decay
=
0.000
0
2
self
.
sequence_network_weight_decay
=
0.000
0
2
self
.
stat_pooling_weight_decay
=
0.000
0
2
self
.
before_speaker_embedding_weight_decay
=
0.000
0
2
self
.
preprocessor_weight_decay
=
0.0002
self
.
sequence_network_weight_decay
=
0.0002
self
.
stat_pooling_weight_decay
=
0.0002
self
.
before_speaker_embedding_weight_decay
=
0.0002
self
.
after_speaker_embedding_weight_decay
=
0.0002
elif
model_archi
==
"rawnet2"
:
...
...
@@ -1067,6 +1067,8 @@ def update_training_dictionary(dataset_description,
training_opts
[
"scheduler"
]
=
dict
()
training_opts
[
"scheduler"
][
"type"
]
=
"ReduceLROnPlateau"
training_opts
[
"scheduler"
][
"step_size_up"
]
=
10
training_opts
[
"scheduler"
][
"epochs"
]
=
1
training_opts
[
"scheduler"
][
"steps_per_epoch"
]
=
5994
training_opts
[
"scheduler"
][
"base_lr"
]
=
1e-8
training_opts
[
"scheduler"
][
"mode"
]
=
"triangular2"
...
...
@@ -1619,7 +1621,7 @@ def xtrain(dataset_description,
scheduler
,
device
,
scaler
=
scaler
,
training_opts
[
"mixed_precision"
])
mixed_precision
=
training_opts
[
"mixed_precision"
])
#aam_scheduler=aam_scheduler)
# Cross validation
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment