Commit 5f5455ca authored by Anthony Larcher's avatar Anthony Larcher
Browse files

minor

parent cbb5ffc5
......@@ -434,7 +434,8 @@ class SideSet(Dataset):
# Open
random_start = numpy.random.randint(int(self.sessions.iloc[index]['start'] * self.sample_rate),
int(self.sessions.iloc[index]['start'] + self.sessions.iloc[index]['duration'] * self.sample_rate) - self.sample_number)
sig, _ = soundfile.read(f"{self.data_path}/{self.sessions.iloc[index]['speaker_id']}/{self.sessions.iloc[index]['file_id']}{self.data_file_extension}",
sig, _ = soundfile.read(f"{self.data_path}/{self.sessions.iloc[index]['file_id']}{self.data_file_extension}",
start=random_start,
stop=random_start + self.sample_number
)
......
......@@ -147,7 +147,7 @@ class Xtractor(torch.nn.Module):
self.feature_size = self.preprocessor.dimension
"""
Prepapre sequence network
Prepare sequence network
"""
# Get Feature size
if self.feature_size is None:
......@@ -264,12 +264,12 @@ def save_checkpoint(state, is_best, filename='checkpoint.pth.tar', best_filename
def xtrain(speaker_number,
dataset_yaml,
epochs=10,
epochs=100,
lr=0.01,
model_yaml=None,
model_name=None,
tmp_model_name=None,
output_model_name=None,
best_model_name=None,
num_thread=1):
"""
Initialize and train an x-vector on a single GPU
......@@ -277,7 +277,7 @@ def xtrain(speaker_number,
:param args:
:return:
"""
device = torch.device("cuda:0")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# If we start from an existing model
if model_name is not None:
......@@ -375,7 +375,7 @@ def xtrain(speaker_number,
'optimizer_state_dict': optimizer.state_dict(),
'accuracy': best_accuracy,
'scheduler': scheduler
}, is_best, filename = tmp_model_name+".pt", best_filename=output_model_name+'.pt')
}, is_best, filename = tmp_model_name+".pt", best_filename=best_model_name+'.pt')
if is_best:
best_accuracy_epoch = epoch
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment