Commit 5b5af6aa authored by Anthony Larcher's avatar Anthony Larcher
Browse files

aam params

parent 6430129d
......@@ -166,7 +166,7 @@ class SideSet(Dataset):
self.transformation = dataset["train"]["transformation"]
else:
self.duration = dataset["eval"]["duration"]
self.transformation = dataset["eval"]["transformation"]
self.transformation = dataset["eval"]["transformation"]
self.sample_number = int(self.duration * self.sample_rate)
......
......@@ -408,6 +408,7 @@ class Xtractor(torch.nn.Module):
self.preprocessor = MelSpecFrontEnd(n_mels=80)
self.sequence_network = PreResNet34()
self.embedding_size = 256
self.before_speaker_embedding = torch.nn.Linear(in_features=5120,
out_features=self.embedding_size)
......@@ -444,8 +445,8 @@ class Xtractor(torch.nn.Module):
if self.loss == "aam":
self.after_speaker_embedding = ArcMarginProduct(self.embedding_size,
int(self.speaker_number),
s = 30,
m = 0.2,
s = 20,
m = 0.3,
easy_margin = False)
elif self.loss == 'aps':
......@@ -1000,9 +1001,9 @@ def xtrain(speaker_number,
param_list.append({'params': model.module.after_speaker_embedding.parameters(), 'weight_decay': model.module.after_speaker_embedding_weight_decay})
optimizer = _optimizer(param_list, **_options)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
step_size=20 * training_loader.__len__(),
gamma=0.5)
scheduler = scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones=[10000,50000,100000],
gamma=0.5)
if mixed_precision:
scaler = torch.cuda.amp.GradScaler()
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment