Commit 28205f33 authored by Anthony Larcher's avatar Anthony Larcher
Browse files

merge and sidesampler

parent c11e4144
......@@ -211,6 +211,7 @@ class MelSpecFrontEnd(torch.nn.Module):
"""
:param x:
:param is_eval:
:return:
"""
with torch.no_grad():
......
......@@ -154,7 +154,7 @@ class SideSampler(torch.utils.data.Sampler):
return iter(self.index_iterator)
def __len__(self) -> int:
return (self.samples_per_speaker * self.spk_count * self.examples_per_speaker) // self.num_replicas
return (self.samples_per_speaker * self.spk_count * self.examples_per_speaker * self.num_replicas) // self.num_process
def set_epoch(self, epoch: int) -> None:
self.epoch = epoch
......
......@@ -863,9 +863,9 @@ def update_training_dictionary(dataset_description,
dataset_opts["batch_size"] = 64
dataset_opts["train"] = dict()
dataset_opts["train"]["duration"] = 2.
dataset_opts["train"]["duration"] = 4.
dataset_opts["train"]["chunk_per_segment"] = -1
dataset_opts["train"]["overlap"] = 1.9
dataset_opts["train"]["overlap"] = 3.9
dataset_opts["train"]["sampler"] = dict()
dataset_opts["train"]["sampler"]["examples_per_speaker"] = 1
dataset_opts["train"]["sampler"]["samples_per_speaker"] = 100
......@@ -944,7 +944,6 @@ def update_training_dictionary(dataset_description,
training_opts["best_model_name"] = "best_model.pt"
training_opts["checkpoint_frequency"] = "10"
# Use options from the YAML config files
fill_dict(dataset_opts, tmp_data_dict)
fill_dict(model_opts, tmp_model_dict)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment