Commit a45c85e5 authored by Hubert Nourtel's avatar Hubert Nourtel
Browse files

Merge remote-tracking branch 'origin/main' into main

# Conflicts:
#	egs/iemocap/config/custom/model.py
#	egs/iemocap/config/custom/model.yaml
parents c7235553 505d4158
......@@ -4,21 +4,21 @@
# General options
data_path: /
data_file_extension: .wav
dataset_csv: list/iemocap_ses1-test.csv
dataset_csv: list/iemocap_ses3-test.csv
sample_rate: 16000
validation_ratio: 0.02
batch_size: 4
batch_size: 100
# Training set
train:
duration: 3.
duration: -1 #3.
chunk_per_segment: -1
overlap: 3.
sampler:
examples_per_speaker: 1
examples_per_speaker: 25
samples_per_speaker: 100
augmentation_replica: 1
......@@ -38,7 +38,7 @@ train:
# Validation set
valid:
duration: 3.
duration: -1 #3.
transformation:
pipeline: # no transformation
......
......@@ -21,7 +21,7 @@ session_test = args.session_test
lr = args.lr
tmp = "model_custom/tmp_custom_{}emo_{}batch_lr-{}_Test-IEMOCAP{}.pt".format(nb_cate, batch, lr, session_test)
best = "model_custom/best_custom_{}emo_{}batch_lr-{}_Test-IEMOCAP{}.pt".format(nb_cate, batch, lr, session_test)
log = "logs/half_resnet34_{}emo_{}batch_lr-{}_Test-IEMOCAP{}.log".format(nb_cate, batch, lr, session_test)
log = "logs/custom_{}emo_{}batch_lr-{}_Test-IEMOCAP{}.log".format(nb_cate, batch, lr, session_test)
## Modification of variables in YAML files
# model.yaml
......@@ -31,11 +31,9 @@ with open("config/custom/model.yaml") as fp:
data = yaml.load(fp)
data['speaker_number'] = int(nb_cate)
with open("config/custom/model.yaml", 'w') as fp:
yaml.dump(data, fp)
# training.yaml
with open("config/custom/training.yaml") as fp:
data = yaml.load(fp)
......@@ -57,3 +55,4 @@ data["dataset_csv"] = "list/iemocap_ses{}-test.csv".format(session_test)
with open("config/custom/Iemocap.yaml", 'w') as fp:
yaml.dump(data, fp)
print("YAML files modified")
# Training description
# General options
log_file: logs/custom.log
log_file: logs/half_resnet34_4emo_100batch_lr-0.0001_Test-IEMOCAP3.log
torch_seed: 42
numpy_seed: 42
random_seed: 42
......@@ -35,6 +35,6 @@ log_interval: 50
validation_frequency: 1
# Save options
tmp_model_name: model_custom/tmp_custom_4emo_4batch_lr-0.0001_Test-IEMOCAP1.pt
best_model_name: model_custom/best_custom_4emo_4batch_lr-0.0001_Test-IEMOCAP1.pt
tmp_model_name: model_custom/tmp_custom_4emo_100batch_lr-0.0001_Test-IEMOCAP3.pt
best_model_name: model_custom/best_custom_4emo_100batch_lr-0.0001_Test-IEMOCAP3.pt
checkpoint_frequency:
......@@ -4,7 +4,7 @@
# General options
data_path: / # path to add before each wavs of list/voxceleb2.csv
data_file_extension: .wav
dataset_csv: list/iemocap_ses2-test.csv
dataset_csv: list/iemocap_ses1-test.csv
sample_rate: 16000
......
......@@ -32,11 +32,9 @@ with open("config/half_resnet34/model.yaml") as fp:
data = yaml.load(fp)
data['speaker_number'] = int(nb_cate)
with open("config/half_resnet34/model.yaml", 'w') as fp:
yaml.dump(data, fp)
# training.yaml
with open("config/half_resnet34/training.yaml") as fp:
data = yaml.load(fp)
......@@ -58,3 +56,4 @@ data["dataset_csv"] = "list/iemocap_ses{}-test.csv".format(session_test)
with open("config/half_resnet34/Iemocap.yaml", 'w') as fp:
yaml.dump(data, fp)
print("YAML files modified")
# Training description
# General options
log_file: logs/half_resnet34_4emo_4batch_lr-0.001_Test-IEMOCAP2.log
log_file: logs/half_resnet34_4emo_4batch_lr-0.1_Test-IEMOCAP1.log
torch_seed: 42
numpy_seed: 42
random_seed: 42
deterministic: false
epochs: 100
lr: 0.001
lr: 0.1
patience: 30
multi_gpu: false
......@@ -35,6 +35,6 @@ log_interval: 461
validation_frequency: 1
# Save options
tmp_model_name: model_half_resnet34/tmp_half_resnet34_4emo_4batch_lr-0.001_Test-IEMOCAP2.pt
best_model_name: model_half_resnet34/best_half_resnet34_4emo_4batch_lr-0.001_Test-IEMOCAP2.pt
tmp_model_name: model_half_resnet34/tmp_half_resnet34_4emo_4batch_lr-0.1_Test-IEMOCAP1.pt
best_model_name: model_half_resnet34/best_half_resnet34_4emo_4batch_lr-0.1_Test-IEMOCAP1.pt
checkpoint_frequency:
......@@ -4,53 +4,53 @@
# General options
data_path: / # path to add before each wavs of list/voxceleb2.csv
data_file_extension: .wav
dataset_csv: list/voxceleb2.csv
dataset_csv: list/iemocap_ses1-test.csv
sample_rate: 16000
validation_ratio: 0.0
batch_size: 192
batch_size: 4
# Training set
train:
duration: 3
chunk_per_segment: -1
overlap: 3
duration: 3
chunk_per_segment: -1
overlap: 3
sampler:
examples_per_speaker: 1
samples_per_speaker: 192
augmentation_replica: 1
sampler:
examples_per_speaker: 1
samples_per_speaker: 192
augmentation_replica: 1
transform_number: 1
transform_number: 1
transformation:
pipeline: add_reverb,add_noise,phone_filtering,codec
transformation:
pipeline: add_reverb,add_noise,phone_filtering,codec
add_noise:
noise_db_csv: list/musan.csv
data_path: /
add_noise:
noise_db_csv: list/musan.csv
data_path: /
add_reverb:
rir_db_csv: list/reverb.csv
data_path: /
add_reverb:
rir_db_csv: list/reverb.csv
data_path: /
# Validation set
valid:
duration: 3
duration: 3
transformation:
pipeline: # no transformation
transformation:
pipeline: # no transformation
add_noise:
noise_db_csv: list/musan.csv
data_path: /
add_noise:
noise_db_csv: list/musan.csv
data_path: /
# Test set (set 'compute_test_eer' to true in training.yaml)
test:
idmap: ./list/asv_test/voxceleb1-O-clean_idmap.h5
ndx: ./list/asv_test/voxceleb1-O-clean_ndx.h5
key: ./list/asv_test/voxceleb1-O-clean_key.h5
data_path: .
id2wav: ./data/asv_test_voxceleb1/voxceleb1-O-clean.id2wav
idmap: ./list/asv_test/voxceleb1-O-clean_idmap.h5
ndx: ./list/asv_test/voxceleb1-O-clean_ndx.h5
key: ./list/asv_test/voxceleb1-O-clean_key.h5
data_path: .
id2wav: ./data/asv_test_voxceleb1/voxceleb1-O-clean.id2wav
......@@ -5,7 +5,7 @@ speaker_number: 4
embedding_size: 256
loss:
type: circle
type: circle
# Initialize model from file, reset and freeze parts of it
initial_model_name: /srv/storage/talc@talc-data.nancy/multispeech/calcul/users/pchampion/lab/model_share/best_vox2_wavlm.pt
......
......@@ -31,11 +31,9 @@ with open("config/wavlm_ecapa_circle/model.yaml") as fp:
data = yaml.load(fp)
data['speaker_number'] = int(nb_cate)
with open("config/wavlm_ecapa_circle/model.yaml", 'w') as fp:
yaml.dump(data, fp)
# training.yaml
with open("config/wavlm_ecapa_circle/training.yaml") as fp:
data = yaml.load(fp)
......@@ -57,3 +55,4 @@ data["dataset_csv"] = "list/iemocap_ses{}-test.csv".format(session_test)
with open("config/wavlm_ecapa_circle/Iemocap.yaml", 'w') as fp:
yaml.dump(data, fp)
print("YAML files modified")
# Training description
# General options
log_file: logs/ecapa_wavlm.log
log_file: logs/half_resnet34_4emo_4batch_lr-0.1_Test-IEMOCAP1.log
torch_seed: 42
numpy_seed: 42
random_seed: 42
deterministic: false
epochs: 100
lr: 0.001
lr: 0.1
patience: 100
multi_gpu: false
......@@ -19,14 +19,14 @@ clipping: false
# Optimizer and scheduler options
optimizer:
type: adamw
options:
type: adamw
options:
scheduler:
type: OneCycleLR
options:
epochs: 8
steps_per_epoch: 5994
type: OneCycleLR
options:
epochs: 8
steps_per_epoch: 5994
# Evaluation options
......@@ -35,6 +35,6 @@ log_interval: 461
validation_frequency: 1
# Save options
tmp_model_name: model_wavlm_ecapa_circle/tmp_model_wavlm.pt
best_model_name: model_wavlm_ecapa_circle/best_model_wavlm.pt
tmp_model_name: model_wavlm_ecapa_circle/tmp_wavlm_ecapa_circle_4emo_4batch_lr-0.1_Test-IEMOCAP1.pt
best_model_name: model_wavlm_ecapa_circle/best_wavlm_ecapa_circle_4emo_4batch_lr-0.1_Test-IEMOCAP1.pt
checkpoint_frequency:
import torch
from tqdm import tqdm
import os
import re
import seaborn as sns
import pandas as pd
import numpy as np
from math import sqrt
import torchaudio
import sklearn.metrics as metrics
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
from sidekit.nnet.xvector import Xtractor
import argparse
......@@ -52,7 +55,11 @@ ses_nb = args.session_test
labels = list(args.emotions.split(" "))
nb_batch = str(args.batchs)
model_type = args.model
lr = str(args.lr)
if "-" in model_type:
model_type, model_2nd = model_type.split("-")[0], "-" + model_type.split("-")[1]
else:
model_2nd = ""
lr = str(float(args.lr))
cates = str(args.categories)
if args.freeze is not None:
freeze = "_freeze"
......@@ -64,6 +71,7 @@ else:
xtract, config = load_model("model_{}/best_{}_{}emo_{}batch_lr-{}_Test-IEMOCAP{}.pt"
.format(model_type, model_type, cates, nb_batch, lr, ses_nb), "cuda")
model_2nd += "_{}".format(config["model_archi"]["loss"]["type"])
predictions = []
gold_anno = []
path = "data/IEMOCAP/Session{}/sentences/wav".format(ses_nb)
......@@ -81,7 +89,8 @@ index.close()
dic_index_emo = {}
# Needed when the model_type == custom
dico = {}
if model_type == "custom":
dico = {}
for elmt in index_emo:
dic_index_emo[int(elmt.split(": ")[1].replace("\n", ""))] = elmt.split(": ")[0]
......@@ -114,30 +123,33 @@ assert len(predictions) == len(gold_anno)
# We start to compare the predictions and gold_anno lists
UAR = metrics.recall_score(gold_anno, predictions, average="macro")
UARPercent = round(UAR * 100, 2)
print("\nUAR:", UARPercent, "%\n")
p = round(UAR, 2) # For the confidence interval
confMatrix = metrics.confusion_matrix(gold_anno, predictions)
print(confMatrix, "\n")
confMatrix = metrics.confusion_matrix(gold_anno, predictions, labels=labels)
print(confMatrix)
gold_dic = []
gold_dic = {key: 0 for key in labels}
dico = {key: gold_dic for key in labels}
for i in range(len(confMatrix)):
gold_dic.append(sum(confMatrix[i]))
for gold, pred in zip(gold_anno, predictions):
dico[gold][pred] += 1
gold_dic[gold] += 1
n = sum(gold_dic) # For the confidence interval
[print("Total", key, ":", value) for key, value in gold_dic.items()]
conf_inter = round((1.96*(sqrt((p*(1-p))/n)), 2) # 95% confidence interval
UARPercent = round(UAR * 100, 2)
print("\nUAR:", UARPercent, "% ±", conf_inter, "\n")
[print("Total", labels[i], ":", gold_dic[i]) for i in range(len(labels))]
print("Total:", n)
annot = []
em = [value for value in gold_dic.values()]
for i in range(0, len(confMatrix)): # row
annot.append([])
tot = 0
for j in range(0, len(confMatrix[i])): # column
nbr = confMatrix[i][j]
percent = round(nbr/em[i], 2)*100
percent = round(nbr/gold_dic[i], 2)*100
tot += percent
if j == len(confMatrix[i])-1:
if tot > 100:
......@@ -154,17 +166,22 @@ fil.close()
# Search for "Loss:", "Validation Loss", "Epoch" and "reducing" in all lines
valid_loss = [line for line in file if "Validation Loss" in line]
lr_scheduler = [float(line.rsplit(":",1)[1].replace("\n","")) for line in file if "Scheduler" in line]
# "Loss:" and "Epoch" in same line
if model_type == "custom":
loss_epoch = [line for line in file if "Epoch" in line]
loss_epoch = loss_epoch[::2]
#loss_epoch = loss_epoch[::2] # 2 because only 1 gpu used: if more gpus are used, you have to do 2*nb_gpus
loss_epoch = [line for line in loss_epoch if "[1/" in line]
elif model_type == "half_resnet34":
loss_epoch = [line for line in file if "Epoch" in line][1:]
else:
loss_epoch = [line for line in file if "Epoch" in line][2:]
loss_epoch = [line for line in file if "Epoch" in line]
reduce_lr = [line for line in file if "reducing" in line] # Can be empty
reduce_lr_list = [line for line in file if "reducing" in line] # Can be empty
vloss = [] # Validation losses
vacc = [] # Validation accuracy
tloss = [] # Training losses
aepoch = [] # Number of epochs
......@@ -172,70 +189,100 @@ for linev, linel in zip(valid_loss, loss_epoch):
linel = linel.split(":")
linev = linev.split("=")
vloss.append(round(float(linev[3].replace("\n", "").replace(" ", "")), 2))
vacc.append(round(float(linev[1].split("%")[0].replace(" ","")),2))
aepoch.append(linel[3].split(" ")[1])
tloss.append(round(float(linel[4].split("\t")[0].replace(" ", "")), 2))
assert len(aepoch) == len(tloss) == len(vloss)
print("--------------")
for e, t, v in zip(aepoch, tloss, vloss):
print("Epoch:", e, "\ttrain loss:", t, "\tvalid loss:", v)
assert len(aepoch) == len(tloss) == len(vloss) == len(vacc)
print("--------------\n")
for e, t, v ,a in zip(aepoch, tloss, vloss, vacc):
print("Epoch:", e, "\ttrain loss:", t, "\tvalid loss:", v, "\tvalid acc.:", a)
### Plotting of confusion matrix and losses ###
path = "model_{}/Sess{}_test/{}emo_{}batch_lr-{}{}".format(model_type, ses_nb, cates, nb_batch, lr, freeze)
if model_type == "custom":
path = "model_{}/Sess{}_test/{}/{}emo_{}batch_lr-{}{}".format(model_type, ses_nb, model_2nd[1:] ,cates, nb_batch, lr, freeze)
if not os.path.isdir(path.rsplit("/",2)[0]):
os.mkdir(path.rsplit("/",2)[0])
else:
path = "model_{}/Sess{}_test/{}emo_{}batch_lr-{}{}".format(model_type, ses_nb, cates, nb_batch, lr, freeze)
if not os.path.isdir(path.rsplit("/", 1)[0]):
os.mkdir(path.rsplit("/", 1)[0])
if not os.path.isdir(path):
os.mkdir(path)
# Plot confusion matrice
## Plot confusion matrice ##
sns.heatmap(confMatrix, annot=annot, fmt="10", cmap="Blues", vmin=0, vmax=350, xticklabels=labels, yticklabels=labels)
plt.title("Model: " + str(config["model_archi"]["model_type"]) + "{}_".format(freeze) + str(config["speaker_number"]) +
"emo_{}batch\nepoch: {} lr: {} Data: Test-IEMOCAP {}".format(nb_batch, aepoch[-1], lr, ses_nb) +
" UAR = " + str(UARPercent) + "%")
plt.title("Model: {}{}{}_".format(model_type, model_2nd, freeze) + str(config["speaker_number"]) + "emo_{}batch\nepoch: {} lr: {} Data: Test-IEMOCAP {}".format(nb_batch, aepoch[-1], lr, ses_nb) + " UAR = " + str(UARPercent) + "%")
plt.xlabel("Prediction")
plt.ylabel("Ground truth")
plt.savefig(os.path.join(path, "confusion_matrix_{}{}_".format(model_type, freeze) + str(config["speaker_number"]) +
"emo_{}batch_epoch-{}_lr-{}_Test-IEMOCAP{}.png".format(nb_batch, aepoch[-1], lr, ses_nb)))
plt.savefig(os.path.join(path, "confusion_matrix_{}{}{}_".format(model_type, model_2nd, freeze) + str(config["speaker_number"]) + "emo_{}batch_epoch-{}_lr-{}_Test-IEMOCAP{}.png".format(nb_batch, aepoch[-1], lr, ses_nb)))
plt.show()
plt.clf()
print("\nConfusion matrix done!")
# Plot losses
ticks = [nb for nb in range(0, 9)]
eticks = [nb for nb in range(0, len(aepoch), 5)]
plt.plot(aepoch, tloss, label="Training loss")
plt.plot(aepoch, vloss, label="Validation loss")
plt.yticks(ticks)
plt.xticks(eticks)
if len(reduce_lr) != 0:
colors = ["b", "g", "y", "c", "m", "r"]
print("\nConfusion matrix plotted!")
## Plot losses ##
fig, axs = plt.subplots(2, 1, constrained_layout=True)
ax = axs[0]
twin = ax.twinx()
ax2 = axs[1]
# 1st subplot #
plot_tl, = ax.plot(tloss, label="Training loss")
plot_vl, = ax.plot(vloss, label="Validation loss")
plot_vac, = twin.plot(vacc, "g-.", label="Validation accuracy")
handles = [plot_tl, plot_vl, plot_vac] # For the legend
# Change of learning rate during the training (can be disused)
if len(reduce_lr_list) != 0:
reduce_lr_list = [re.sub(r"\s+", " ", elmt) for elmt in reduce_lr_list]
reduce_lr_list = [elmt.split(" ") for elmt in reduce_lr_list]
reduce_lr_list = {elmt[1][:-1] : elmt[-1][:-1]for elmt in reduce_lr_list}
colors = ["b", "m", "y", "c", "g", "r"]
i = 0
for key, value in reduce_lr.items():
label = "lr: " + str(value)
plt.axvline(x=key, color=colors[i], linestyle='--', label=label)
handles.append(plt.axvline(x=key, color=colors[i], linestyle='--', label=label))
i += 1
plt.legend()
plt.title("Model: {}{}_{}emo_{}batch\nEpoch: {} lr: {} Data: Test-IEMOCAP {}".format(model_type, freeze, cates,
nb_batch, aepoch[-1], lr, ses_nb)
)
plt.savefig(os.path.join(path, "losses_{}{}_{}emo_{}batch_epoch-{}_lr-{}_Test-IEMOCAP{}.png".format(model_type, freeze,
cates, nb_batch,
aepoch[-1], lr,
ses_nb)))
# 2nd subplot #
plot_lr = ax2.plot(lr_scheduler, "m")
maxlr, minlr = max(lr_scheduler), min(lr_scheduler) # Used for legend
maxlr_index, minlr_index = lr_scheduler.index(maxlr), lr_scheduler.index(minlr) # Used for axvline
lr_max = ax2.axvline(x=maxlr_index, color="r", linestyle='--', label = "Max: {}".format(format((maxlr),".1e")))
lr_min = ax2.axvline(x=minlr_index, color="b", linestyle='--', label = "Min: {}".format(format((minlr),".1e")))
handles_lr = [lr_max, lr_min] # For the legend
ax2.legend(handles=handles_lr) # Show the legend
# General settings #
# 1st subplot #
ax.set_xlabel("Epochs")
ax.set_ylabel("CrossEntropyLoss()")
twin.set_ylabel("Validation accuracy (%)")
twin.set_ylim(0, 100)
ax.set_ylim(0,max(vloss)+1)
ax.set_xlim(0, len(aepoch))
ax.legend(handles=handles)
# 2nd subplot #
ax2.set_title("Scheduler learning rate")
ax2.get_xaxis().set_visible(False) # Do not show the x axis
ax2.yaxis.set_major_formatter(FormatStrFormatter('%.e')) # Format of the y ticks
# Figure #
plt.title("Model: {}{}{}_{}emo_{}batch\nEpoch: {} lr: {} Data: Test-IEMOCAP {}".format(model_type, model_2nd, freeze, cates, nb_batch, aepoch[-1], lr, ses_nb))
plt.savefig(os.path.join(path, "losses_{}{}{}_{}emo_{}batch_epoch-{}_lr-{}_Test-IEMOCAP{}.png".format(model_type, model_2nd, freeze, cates, nb_batch, aepoch[-1], lr, ses_nb)))
plt.show()
plt.clf()
print("Losses plotted!\n")
### We move confusion matrix and logs to another folder ###
os.replace("model_{}/best_{}_{}emo_{}batch_lr-{}_Test-IEMOCAP{}.pt".format(model_type, model_type, cates, nb_batch, lr,
ses_nb),
os.path.join(path, "best_{}{}_{}emo_{}batch_lr-{}_Test-IEMOCAP{}.pt".format(model_type, freeze, cates,
nb_batch, lr, ses_nb)))
os.replace("model_{}/tmp_{}_{}emo_{}batch_lr-{}_Test-IEMOCAP{}.pt".format(model_type, model_type, cates, nb_batch, lr,
ses_nb),
os.path.join(path, "tmp_{}{}_{}emo_{}batch_lr-{}_Test-IEMOCAP{}.pt".format(model_type, freeze, cates,
nb_batch, lr, ses_nb)))
os.replace("logs/{}_{}emo_{}batch_lr-{}_Test-IEMOCAP{}.log".format(model_type, cates, nb_batch, lr, ses_nb),
os.path.join(path, "{}{}_{}emo_{}batch_lr-{}_Test-IEMOCAP{}.log".format(model_type, freeze, cates,
nb_batch, lr, ses_nb)))
os.replace("model_{}/best_{}_{}emo_{}batch_lr-{}_Test-IEMOCAP{}.pt".format(model_type, model_type, cates, nb_batch, lr, ses_nb), os.path.join(path, "best_{}{}{}_{}emo_{}batch_lr-{}_Test-IEMOCAP{}.pt".format(model_type, model_2nd, freeze, cates, nb_batch, lr, ses_nb)))
os.replace("model_{}/tmp_{}_{}emo_{}batch_lr-{}_Test-IEMOCAP{}.pt".format(model_type, model_type, cates, nb_batch, lr, ses_nb), os.path.join(path, "tmp_{}{}{}_{}emo_{}batch_lr-{}_Test-IEMOCAP{}.pt".format(model_type, model_2nd, freeze, cates, nb_batch, lr, ses_nb)))
os.replace("logs/{}_{}emo_{}batch_lr-{}_Test-IEMOCAP{}.log".format(model_type, cates, nb_batch, lr, ses_nb), os.path.join(path, "{}{}{}_{}emo_{}batch_lr-{}_Test-IEMOCAP{}.log".format(model_type, model_2nd, freeze, cates, nb_batch, lr, ses_nb)))
......@@ -2,6 +2,7 @@ import torch
from tqdm import tqdm
import os
import seaborn as sns
from math import sqrt
import pandas as pd
import numpy as np
import torchaudio
......@@ -25,7 +26,12 @@ args = parser.parse_args()
labels = list(args.emotions.split(" "))
nb_batch = str(args.batchs)
model_type = args.model
lr = str(args.lr)
if "(" in model_type:
model_type, model_2nd = model_type.split("(")[0], "-" + model_type.split("(")[1]
else:
model_2nd = ""
lr = str(float(args.lr))
cates = str(args.categories)
if args.freeze is not None:
freeze = "_freeze"
......@@ -33,7 +39,14 @@ else:
freeze = ""
# Path to save the confusion matrix for cross-validation
path = "model_{}/Sess_all_cross-valid/{}emo_{}batch_lr-{}{}".format(model_type, cates, nb_batch, lr, freeze)
if model_type == "custom":
path = "model_{}/Sess_all_cross-valid/{}/{}emo_{}batch_lr-{}{}".format(model_type, model_2nd[1:] ,cates, nb_batch, lr, freeze)
if not os.path.isdir(path.rsplit("/", 2)[0]):
os.mkdir(path.rsplit("/", 2)[0])
else:
path = "model_{}/Sess_all_cross-valid/{}emo_{}batch_lr-{}{}".format(model_type, cates, nb_batch, lr, freeze)
if not os.path.isdir(path.rsplit("/", 1)[0]):
os.mkdir(path.rsplit("/", 1)[0])
if not os.path.isdir(path):
......@@ -73,7 +86,9 @@ index = open("list/emos_index.txt", "r")
index_emo = index.readlines()