Commit 7af44ad4 authored by Félix Michaud's avatar Félix Michaud
Browse files

save prog

parent 7bb23c9d
......@@ -67,8 +67,8 @@ def warpgrid(HO, WO, warp=True):
def create_im(mag):
magim = mag.unsqueeze(0).unsqueeze(0)
grid_warp = torch.from_numpy(warpgrid(256, magim.shape[3], warp=True))
magim = F.grid_sample(magim, grid_warp)
return magim
magim = torch.log(F.grid_sample(magim, grid_warp))
return torch.from_numpy(np.flipud(magim).copy())
class Dataset(data.Dataset):
'Characterizes a dataset for PyTorch'
......
......@@ -13,12 +13,10 @@ import fnmatch
import csv
import torch
from arguments import ArgParser
from Calls import CallDataset
from unet import UNet6
from unet import UNet
import torch.nn as nn
from tensorboardX import SummaryWriter
from matplotlib import image as mpimg
from EvalDataset import CallEVALDataset
from Dataloader import Dataset
import matplotlib
import numpy as np
......@@ -33,32 +31,6 @@ def create_list(path, ext):
return list_names
#def build_classes_dictionary(path, ext):
# dict_classes = collections.OrderedDict()
#
# for root, dirnames, filenames in os.walk(path):
# for filename in fnmatch.filter(filenames, '*' + ext):
# classe = root.split("/")[-1]
# if classe in dict_classes.keys():
# dict_classes[classe].append(os.path.join(root, filename))
# else:
# dict_classes[classe] = [os.path.join(root, filename)]
# return dict_classes
#
#
#def selec_classes(root, nb_classes, ext):
# list_subfolders = [f.path for f in os.scandir(root) if f.is_dir()]
# list_subfolders.sort()
# list_classes = []
# for ii in range(nb_classes):
# classes = collections.OrderedDict()
# samples = create_list(list_subfolders[ii], ext)
# name = list_subfolders[ii].rsplit('/', 1)[1]
# classes[ name ] = samples
# list_classes.append(classes)
# return list_classes
def create_optimizer(nets, args):
net_sound = nets
param_groups = [{'params': net_sound.parameters(), 'lr': args.lr_sound}]
......@@ -91,14 +63,14 @@ def train(net, loader_train, optimizer, args):
# #writing of the Loss values and elapsed time for every batch
batchtime = (time.time() - args.starting_training_time)/60 #minutes
# #Writing of the elapsed time and loss for every batch
with open("./losses/loss_train/loss_times4U6.csv", "a") as f:
with open("./losses/loss_train/loss_times5.csv", "a") as f:
writer = csv.writer(f)
writer.writerow([str(loss.cpu().detach().numpy()), batchtime])
if batchtime%args.save_per_batchs == 0:
writer.writerow([str(loss.cpu().detach().numpy()), batchtime, num_batch])
if ii%args.save_per_batchs == 0:
torch.save({
'model_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict()},
'Saved_models2/model4U6_batchs{}.pth.tar'.format(num_batch))
'Saved_models3/model5_batchs{}.pth.tar'.format(num_batch))
#***************************************************
#****************** MAIN ***************************
......@@ -110,13 +82,13 @@ if __name__ == '__main__':
args.batch_size = 16
args.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
args.starting_training_time = time.time()
args.save_per_batchs = 30
args.nb_classes = 3
args.save_per_batchs = 180
args.nb_classes = 2
args.mode = 'train'
args.lr_sounds = 1e-4
args.lr_sounds = 1e-5
args.saved_model = '5_5000'
#model definition
net = UNet6(n_channels=1, n_classes=args.nb_classes)
net = UNet(n_channels=1, n_classes=args.nb_classes)
net = net.to(args.device)
# Set up optimizer
optimizer = create_optimizer(net, args)
......@@ -125,24 +97,26 @@ if __name__ == '__main__':
###########################################################
if args.mode == 'train':
#OverWrite the Files for loss saving and time saving
fichierLoss = open("./losses/loss_train/loss_times4U6.csv", "w")
fichierLoss = open("./losses/loss_train/loss_times5.csv", "w")
fichierLoss.close()
#Dataset loading
root = './data/classes_train/'
root = './data_sound/trainset/'
ext = '.wav'
train_classes = Dataset(root,nb_classes=args.nb_classes, path_background="./data/noises/")
train_classes = Dataset(root, nb_classes=args.nb_classes, path_background="./data_sound/noises/")
print('apres dataset')
loader_train = torch.utils.data.DataLoader(
train_classes,
batch_size = args.batch_size,
shuffle=True,
num_workers=6)
num_workers=20)
for epoch in range(0, 2):
print('avant epoch')
for epoch in range(0, 1):
train(net, loader_train, optimizer, args)
torch.save({
'model_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict()},
'Saved_models2/model4U6epoch{}.pth.tar'.format(epoch))
'Saved_models3/model5epoch{}.pth.tar'.format(epoch))
###########################################################
################### EVALUATION ############################
###########################################################
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment