Commit 244a0de5 authored by Gaël Le Lan's avatar Gaël Le Lan
Browse files

bugfix

parent c324e7e1
......@@ -520,7 +520,7 @@ class Xtractor(torch.nn.Module):
self.sequence_network = PreHalfResNet34()
self.embedding_size = 512
self.before_speaker_embedding = torch.nn.Linear(in_features = 2560,
self.before_speaker_embedding = torch.nn.Linear(in_features = 5120,
out_features = self.embedding_size)
self.stat_pooling = AttentivePooling(256, 80)
......@@ -1110,8 +1110,8 @@ def xtrain(speaker_number,
non_indices = torch.tril(~mask, -1).numpy()
tar_non_ratio = numpy.sum(tar_indices)/numpy.sum(non_indices)
non_indices *= numpy.random.choice([False, True], size=non_indices.shape, p=[1-tar_non_ratio, tar_non_ratio])
tar_indices *= numpy.random.choice([False, True], size=tar_indices.shape, p=[0.9, 0.1])
non_indices *= numpy.random.choice([False, True], size=non_indices.shape, p=[0.9, 0.1])
#tar_indices *= numpy.random.choice([False, True], size=tar_indices.shape, p=[0.9, 0.1])
#non_indices *= numpy.random.choice([False, True], size=non_indices.shape, p=[0.9, 0.1])
logging.critical("val tar count : {:d}, non count : {:d}".format(numpy.sum(tar_indices), numpy.sum(non_indices)))
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment