Commit 512a4b09 authored by Mano Brabant's avatar Mano Brabant
Browse files

First version of the benchmark's tool

parent a6ece072
...@@ -10,9 +10,10 @@ def parse(): ...@@ -10,9 +10,10 @@ def parse():
# #name for user #name for program #type #default values #explanation sentences # #name for user #name for program #type #default values #explanation sentences
parser.add_argument('--input_dir', dest='input_dir', type=str, default='./PyTorchCheckpoint/', help='directory of saved checkpoints for denoising operation or retraining') parser.add_argument('--input_dir', dest='input_dir', type=str, default='./PyTorchCheckpoint/', help='directory of saved checkpoints for denoising operation or retraining')
parser.add_argument('--output_dir', dest='output_dir', type=str, default=None, help='directory of saved checkpoints for denoising operation or retraining')
parser.add_argument('--train_dir', dest='train_dir', type=str, default='./Holography/DATABASE/', help='directory of training database') parser.add_argument('--train_dir', dest='train_dir', type=str, default='./Holography/HOLODEEPmat/DATABASE/', help='directory of training database')
parser.add_argument('--eval_dir', dest='eval_dir', type=str, default='./Holography/DATABASE/', help='directory of training database') parser.add_argument('--eval_dir', dest='eval_dir', type=str, default='./Holography/HOLODEEPmat/DATABASE/', help='directory of evaluation database')
parser.add_argument('--test_dir', dest='test_dir', type=str, default='./Holography/DATAEVAL/DATAEVAL/', help='directory of testing database') parser.add_argument('--test_dir', dest='test_dir', type=str, default='./Holography/DATAEVAL/DATAEVAL/', help='directory of testing database')
parser.add_argument('--save_test_dir', dest='save_test_dir', type=str, default='./TestImages/', help='directory where results of de-noising operation will be saved') parser.add_argument('--save_test_dir', dest='save_test_dir', type=str, default='./TestImages/', help='directory where results of de-noising operation will be saved')
...@@ -26,8 +27,8 @@ def parse(): ...@@ -26,8 +27,8 @@ def parse():
parser.add_argument('--test_patterns', dest='test_patterns', type=int, nargs='+', default=(1, 2, 3, 4, 5), help='patterns used for testing') parser.add_argument('--test_patterns', dest='test_patterns', type=int, nargs='+', default=(1, 2, 3, 4, 5), help='patterns used for testing')
parser.add_argument('--test_noises', dest='test_noises', type=str, default="0-1-1.5-2-2.5", help='noise levels used for testing ') parser.add_argument('--test_noises', dest='test_noises', type=str, default="0-1-1.5-2-2.5", help='noise levels used for testing ')
parser.add_argument('--clean_train', dest='clean_train', type=str, default='data1/img_clean_train_1-2-3-4-5_0-1-1.5-2-2.5_two_50_50_384.npy', help='filepath of noise free file for training') parser.add_argument('--clean_train', dest='clean_train', type=str, default='data1/img_clean_train_1_0_two_50_50_3.npy', help='filepath of noise free file for training')
parser.add_argument('--noisy_train', dest='noisy_train', type=str, default='data1/img_noisy_train_1-2-3-4-5_0-1-1.5-2-2.5_two_50_50_384.npy', help='filepath of noisy file for training') parser.add_argument('--noisy_train', dest='noisy_train', type=str, default='data1/img_noisy_train_1_0_two_50_50_3.npy', help='filepath of noisy file for training')
parser.add_argument('--clean_eval', dest='clean_eval', type=str, default='data1/img_clean_train_1-2-3_0-1-1.5two.npy', help='filepath of noise free file for eval') parser.add_argument('--clean_eval', dest='clean_eval', type=str, default='data1/img_clean_train_1-2-3_0-1-1.5two.npy', help='filepath of noise free file for eval')
parser.add_argument('--noisy_eval', dest='noisy_eval', type=str, default='data1/img_noisy_train_1-2-3_0-1-1.5two.npy', help='filepath of noisy file for eval') parser.add_argument('--noisy_eval', dest='noisy_eval', type=str, default='data1/img_noisy_train_1-2-3_0-1-1.5two.npy', help='filepath of noisy file for eval')
...@@ -57,6 +58,7 @@ def parse(): ...@@ -57,6 +58,7 @@ def parse():
parser.add_argument('--use_gpu', dest='use_gpu', type=int, default=1, help='gpu flag, 1 for GPU and 0 for CPU') parser.add_argument('--use_gpu', dest='use_gpu', type=int, default=1, help='gpu flag, 1 for GPU and 0 for CPU')
parser.add_argument('--checkpoint_dir', dest='ckpt_dir', type=str, default='./checkpoint', help='models are saved here') parser.add_argument('--checkpoint_dir', dest='ckpt_dir', type=str, default='./checkpoint', help='models are saved here')
parser.add_argument('--ckpt_dir', dest='ckpt_dir', type=str, default='./checkpoint', help='models are saved here')
parser.add_argument('--sample_dir', dest='sample_dir', type=str, default='./sample', help='sample are saved here') parser.add_argument('--sample_dir', dest='sample_dir', type=str, default='./sample', help='sample are saved here')
#parser.add_argument('--test_dir', dest='test_dir', default='./test', help='test sample are saved here') #parser.add_argument('--test_dir', dest='test_dir', default='./test', help='test sample are saved here')
...@@ -91,7 +93,7 @@ def parse(): ...@@ -91,7 +93,7 @@ def parse():
parser.add_argument('--perform_validation', dest='perform_validation', action="store_true") parser.add_argument('--perform_validation', dest='perform_validation', action="store_true")
parser.add_argument('--scales', dest='scales', type=int, nargs='+', default=[1], help='size of test images') parser.add_argument('--scales', dest='scales', type=int, nargs='+', default=(1), help='size of test images')
parser.add_argument('--originalsize', dest='originalsize', type=int, nargs='+', default=(1024, 1024), help='size of test images') parser.add_argument('--originalsize', dest='originalsize', type=int, nargs='+', default=(1024, 1024), help='size of test images')
return parser.parse_args() return parser.parse_args()
......
import numpy
import time
import datetime
import itertools
import subprocess
import os
import fnmatch
from statistics import mean
import csv
from argument import *
from utils import *
import os
os.environ['MKL_THREADING_LAYER'] = 'GNU'
import random
random.seed(10)
valid_args = vars(parse())
class Report(object):
"""
This class represent a raport for a benchmark
"""
def __init__(self, benchmark):
"""
This ocnstructor create a new report for a benchmark
:param benchmark: The benchmark from which a report will be made
"""
self.res_psnr = []
self.res_std = []
self.benchmark = benchmark
def make_report(self):
input_dir = self.benchmark.getInputDir() + "/Test/"
print(input_dir)
for root, dirs, files in os.walk(input_dir):
for file in fnmatch.filter(files, "*.res"):
print(file)
with open(os.path.join(root, file), "r") as f:
lines = f.read()
self.res_psnr.append(float(list(filter(lambda string : string.startswith('psnr'), lines.split('\n')))[0].split(':')[1]))
self.res_std.append(float(list(filter(lambda string : string.startswith('std'), lines.split('\n')))[0].split(':')[1]))
print(input_dir)
print("Average psnr : ", mean(self.res_psnr))
print("Average std : ", mean(self.res_std))
print("\n\n\n\n\n")
def toCSV(self):
if(len(self.res_psnr) == 0):
self.make_report()
mylist = [[]]
with open(self.benchmark.file_path, 'r', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=';')
mylist = list(reader)
print(mylist)
print(mylist[0])
print(mylist[2])
mylist[0][0] = self.benchmark.input_dir
mylist[2][1] = "1"
mylist[3][1] = "1"
mylist[4][1] = mean(self.res_std)
mylist[4][2] = mean(self.res_psnr)
with open(self.benchmark.file_path, 'w', newline='') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=';',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerows(mylist)
class Benchmark(object):
"""
This class is a tool for making benchmark
"""
def __init__(self, file_path):
"""
This constructor create a new benchmark for a csv file
:param file_path: The path to the csv file
"""
self.input_dir = 'Benchmark/benchmark_{}'.format(datetime.datetime.now().strftime("%d_%m_%Y-%H:%M:%S"))
self.file_path = file_path
self.params = dict()
def getParam(self):
"""
This method get the changing param from the csv file
"""
for arg in valid_args:
if(isinstance(valid_args[arg], tuple)):
self.params[arg] = ' '.join([str(i) for i in valid_args[arg]])
else:
self.params[arg] = valid_args[arg]
with open(self.file_path, 'r', newline='') as csvfile:
spamreader = csv.reader(csvfile, delimiter=';', quotechar='|')
for row in spamreader:
if(len(row) >= 2 and row[0] in valid_args):
self.params[row[0]] = row[1]
self.params['input_dir'] = self.input_dir
self.params['save_test_dir'] = '.'
print(self.params)
def getNbTest(self):
"""
This method return the number of test launched by the benchmark
"""
return 1
def getInputDir(self):
"""
This method return the benchmark working directory
"""
return '{}'.format(self.input_dir)
def toString(self):
"""
This method return a representation of the benchmark
"""
return """
Benchmark :
{}
""".format(self.params)
def get_params_string(self):
"""
This method join in a string all params from the csv file
"""
temp = ' '.join(["--" + k + " " + str(v) if v != None and v != '' and v != False else "" for k, v in self.params.items()])
return temp
def launch_benchmark_data(self):
"""
This method launch the creation of data's configuration for the different tests (just one for the moment)
"""
process = []
list_params = self.get_params_string()
cmd = '''
python3 generate_patches_holo_fromMAT.py {} &
'''.format(list_params).replace("\n", "")
p = subprocess.Popen(cmd, shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
process.append(p)
p.communicate()
exit_codes = [p.communicate() for p in process]
sess_name = extract_sess_name(tuple(self.params['train_patterns'].split(" ")), self.params['train_noises'], self.params['phase_type'], self.params['stride'], self.params['patch_size'], self.params['patch_per_image']).replace(' ','')
self.params['clean_train'] = os.path.join(self.params['save_dir'], "img_clean_train_" + sess_name + ".npy")
self.params['noisy_train'] = os.path.join(self.params['save_dir'], "img_noisy_train_" + sess_name + ".npy")
sess_name = extract_sess_name(tuple(self.params['eval_patterns'].split(" ")), self.params['eval_noises'], self.params['phase_type'], self.params['stride'], self.params['patch_size'], self.params['patch_per_image']).replace(' ','')
self.params['clean_eval'] = os.path.join(self.params['save_dir'], "img_clean_train_" + sess_name + ".npy")
self.params['noisy_eval'] = os.path.join(self.params['save_dir'], "img_noisy_train_" + sess_name + ".npy")
print(self.params)
#print("exit_codes :", exit_codes)
def launch_benchmark_training(self):
"""
This method launch the training for the different configurations (just one for the moment)
"""
process = []
input_dir = self.getInputDir()
output_dir = '/'
list_params = self.get_params_string()
cmd = '''
python3 main_holo.py --num_epochs 1 --output_dir {} {} &
'''.format(output_dir, list_params).replace("\n", "")
p = subprocess.Popen(cmd, shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
process.append(p)
p.communicate()
exit_codes = [p.communicate() for p in process]
#print("exit_codes :", exit_codes)
def launch_benchmark_testing(self):
"""
This method launch the tests for the different configurations (just one for the moment)
"""
process = []
input_dir = self.getInputDir()
save_test_dir = '.'
list_params = self.get_params_string()
cmd = '''
python3 main_holo.py --test_mode --save_test_dir {} {} &
'''.format(save_test_dir, list_params).replace("\n", "")
print("Testing CMD : ", cmd)
p = subprocess.Popen(cmd, shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
process.append(p)
p.communicate()
exit_codes = [p.communicate() for p in process]
#print("exit_codes :", exit_codes)
def summaryRes(self):
"""
This method make a summary for the benchmark
"""
report = Report(self)
report.make_report()
report.toCSV()
def launch_benchmark(self):
"""
This method launch the benchmark process
that has been configured by the csv file
"""
self.getParam()
os.makedirs(self.input_dir)
print(self.toString())
with open('{}/config_benchmark.txt'.format(self.input_dir), "w+") as f:
print(self.toString(), file=f)
timeData = time.time()
self.launch_benchmark_data()
print("Time elapsed configuring the data : ", time.time() - timeData)
timeElapsed = time.time()
self.launch_benchmark_training()
print("Time elapsed training : ", time.time() - timeElapsed)
timeElapsed = time.time()
self.launch_benchmark_testing()
print("Time elapsed testing : ", time.time() - timeElapsed)
self.summaryRes()
if __name__ == '__main__':
timeElapsed = time.time()
benchmark = Benchmark('res_brut.csv')
print("Number of test : ", benchmark.getNbTest())
benchmark.launch_benchmark()
print("Time elapsed : ", time.time() - timeElapsed)
...@@ -59,11 +59,11 @@ parser = argparse.ArgumentParser(description='') ...@@ -59,11 +59,11 @@ parser = argparse.ArgumentParser(description='')
#parser.add_argument('--noise_src_dir', dest='noise_src_dir', default='/lium/raid01_c/tahon/holography/HOLODEEP', help='dir of noisy data') #parser.add_argument('--noise_src_dir', dest='noise_src_dir', default='/lium/raid01_c/tahon/holography/HOLODEEP', help='dir of noisy data')
#parser.add_argument('--train_image', dest='train_patterns', default=hparams.train_patterns, help='patterns of images for training') #parser.add_argument('--train_image', dest='train_patterns', default=hparams.train_patterns, help='patterns of images for training')
#parser.add_argument('--train_noise', dest='train_noise', default=hparams.train_noise, help='noise values for training images') #parser.add_argument('--train_noise', dest='train_noise', default=hparams.train_noise, help='noise values for training images')
parser.add_argument('--save_dir', dest='save_dir', default='./data1', help='dir of patches') #parser.add_argument('--save_dir', dest='save_dir', default='./data1', help='dir of patches')
#parser.add_argument('--patch_size', dest='pat_size', type=int, default=hparams.patch_size, help='patch size')#50 for RGB and 70 for grayscale #parser.add_argument('--patch_size', dest='pat_size', type=int, default=hparams.patch_size, help='patch size')#50 for RGB and 70 for grayscale
#parser.add_argument('--stride', dest='stride', type=int, default=hparams.stride, help='stride') #parser.add_argument('--stride', dest='stride', type=int, default=hparams.stride, help='stride')
#parser.add_argument('--step', dest='step', type=int, default=hparams.step, help='step') #parser.add_argument('--step', dest='step', type=int, default=hparams.step, help='step')
parser.add_argument('--params', dest='params', type=str, default='', help='hyper parameters') #parser.add_argument('--params', dest='params', type=str, default='', help='hyper parameters')
# check output arguments # check output arguments
#parser.add_argument('--from_file', dest='from_file', default="./data/img_clean_pats.npy", help='get pic from file') #parser.add_argument('--from_file', dest='from_file', default="./data/img_clean_pats.npy", help='get pic from file')
#parser.add_argument('--num_pic', dest='num_pic', type=int, default=10, help='number of pic to pick') #parser.add_argument('--num_pic', dest='num_pic', type=int, default=10, help='number of pic to pick')
......
...@@ -22,7 +22,7 @@ def save_clean_pred_rad(args, exp, clean_pred_rad, noisy, clean, nom_img = "Nois ...@@ -22,7 +22,7 @@ def save_clean_pred_rad(args, exp, clean_pred_rad, noisy, clean, nom_img = "Nois
nom_img (str, optional) : The saving name for the result nom_img (str, optional) : The saving name for the result
""" """
save_name = os.path.join(args.save_test_dir, os.path.basename(os.path.normpath(args.input_dir))) save_name = os.path.join(args.save_test_dir, args.input_dir, "Test")
if not os.path.exists(save_name): if not os.path.exists(save_name):
os.makedirs(save_name) os.makedirs(save_name)
...@@ -64,11 +64,13 @@ def evaluate_on_HOLODEEP(args, exp): ...@@ -64,11 +64,13 @@ def evaluate_on_HOLODEEP(args, exp):
""" """
patterns = args.test_patterns patterns = args.test_patterns
noises = args.test_noises noises = args.test_noises
clean, noisy = from_DATABASE(args.eval_dir, noises, patterns, True) clean, noisy = from_DATABASE(args.eval_dir, noises, patterns, True)
clean = np.array(clean) clean = np.array(clean)
noisy = np.array(noisy) noisy = np.array(noisy)
...@@ -80,6 +82,7 @@ def evaluate_on_HOLODEEP(args, exp): ...@@ -80,6 +82,7 @@ def evaluate_on_HOLODEEP(args, exp):
std = cal_std_phase(clean_pred_rad, clean[i]) std = cal_std_phase(clean_pred_rad, clean[i])
running_std += std running_std += std
print("On the patterns : ", patterns) print("On the patterns : ", patterns)
print("With noise : ", noises) print("With noise : ", noises)
print("average_std : ", running_std/noisy.shape[0]) print("average_std : ", running_std/noisy.shape[0])
...@@ -95,6 +98,7 @@ def evaluate_on_DATAEVAL(args, exp): ...@@ -95,6 +98,7 @@ def evaluate_on_DATAEVAL(args, exp):
exp (Experiment) : The model used to do the de-noising operation exp (Experiment) : The model used to do the de-noising operation
""" """
dir_name = args.test_dir dir_name = args.test_dir
#nameList = ["DATA_1_Phase_Type1_2_0.25_1.5_4_50.mat", "DATA_20_Phase_Type4_2_0.25_2.5_4_100.mat", "VibPhaseDATA.mat"] #nameList = ["DATA_1_Phase_Type1_2_0.25_1.5_4_50.mat", "DATA_20_Phase_Type4_2_0.25_2.5_4_100.mat", "VibPhaseDATA.mat"]
nameList = get_files(pathlib.Path(dir_name), '.*.mat') nameList = get_files(pathlib.Path(dir_name), '.*.mat')
...@@ -125,6 +129,7 @@ def denoise_img(args, noisy, clean, name, exp): ...@@ -125,6 +129,7 @@ def denoise_img(args, noisy, clean, name, exp):
clean_pred_rad = noisy clean_pred_rad = noisy
nb_iteration = args.nb_iteration nb_iteration = args.nb_iteration
for j in range(nb_iteration): for j in range(nb_iteration):
clean_pred_rad = denoising_single_image(args, clean_pred_rad, exp) clean_pred_rad = denoising_single_image(args, clean_pred_rad, exp)
...@@ -148,8 +153,21 @@ def denoising_single_image(args, noisy, exp): ...@@ -148,8 +153,21 @@ def denoising_single_image(args, noisy, exp):
noisyPy_cos = torch.Tensor(normalize_data(noisyPy, 'cos', None)) noisyPy_cos = torch.Tensor(normalize_data(noisyPy, 'cos', None))
noisyPy_sin = torch.Tensor(normalize_data(noisyPy, 'sin', None)) noisyPy_sin = torch.Tensor(normalize_data(noisyPy, 'sin', None))
clean_pred_cos = exp.test(noisyPy_cos).detach().cpu().numpy() #clean_pred_cos = exp.test(noisyPy_cos).detach().cpu().numpy()
clean_pred_sin = exp.test(noisyPy_sin).detach().cpu().numpy() #clean_pred_sin = exp.test(noisyPy_sin).detach().cpu().numpy()
clean_pred_cos = exp.test(noisyPy_cos)
clean_pred_sin = exp.test(noisyPy_sin)
clean_pred_cos = clean_pred_cos.detach()
clean_pred_sin = clean_pred_sin.detach()
clean_pred_cos = clean_pred_cos.cpu()
clean_pred_sin = clean_pred_sin.cpu()
clean_pred_cos = clean_pred_cos.numpy()
clean_pred_sin = clean_pred_sin.numpy()
clean_pred_rad = np.angle(clean_pred_cos + clean_pred_sin * 1J) clean_pred_rad = np.angle(clean_pred_cos + clean_pred_sin * 1J)
...@@ -173,7 +191,7 @@ def run(args): ...@@ -173,7 +191,7 @@ def run(args):
exp = nt.Experiment(net, adam, statsManager, perform_validation_during_training=args.perform_validation, input_dir=args.input_dir, startEpoch=args.epoch, freq_save=args.freq_save) exp = nt.Experiment(net, adam, statsManager, perform_validation_during_training=args.perform_validation, input_dir=args.input_dir, output_dir=args.output_dir, startEpoch=args.epoch, freq_save=args.freq_save)
if not args.test_mode : if not args.test_mode :
...@@ -191,6 +209,9 @@ def run(args): ...@@ -191,6 +209,9 @@ def run(args):
else : else :
print("args.noisy_img : ", args.noisy_img)
if args.noisy_img is None: if args.noisy_img is None:
evaluate_on_HOLODEEP(args, exp) evaluate_on_HOLODEEP(args, exp)
evaluate_on_DATAEVAL(args, exp) evaluate_on_DATAEVAL(args, exp)
......
...@@ -146,7 +146,7 @@ class Experiment(object): ...@@ -146,7 +146,7 @@ class Experiment(object):
""" """
def __init__(self, net, optimizer, stats_manager, startEpoch=None, def __init__(self, net, optimizer, stats_manager, startEpoch=None,
input_dir=None, perform_validation_during_training=False, freq_save=1): input_dir=None, output_dir=None, perform_validation_during_training=False, freq_save=1):
# Initialize history # Initialize history
history = [] history = []
...@@ -155,7 +155,12 @@ class Experiment(object): ...@@ -155,7 +155,12 @@ class Experiment(object):
if input_dir is None: if input_dir is None:
input_dir = './PyTorchCheckpoint/' input_dir = './PyTorchCheckpoint/'
if output_dir is None:
output_dir = input_dir + '/experiment_{}'.format(datetime.datetime.now().strftime("%Y_%m_%d-%H:%M:%S")) output_dir = input_dir + '/experiment_{}'.format(datetime.datetime.now().strftime("%Y_%m_%d-%H:%M:%S"))
else:
output_dir = input_dir + '{}'.format(output_dir)
checkpoint_path = os.path.join(output_dir, "checkpoint.pth.tar") checkpoint_path = os.path.join(output_dir, "checkpoint.pth.tar")
config_path = os.path.join(input_dir, "config.txt") config_path = os.path.join(input_dir, "config.txt")
...@@ -390,4 +395,3 @@ class Experiment(object): ...@@ -390,4 +395,3 @@ class Experiment(object):
plt.xlabel("Nb Epoch") plt.xlabel("Nb Epoch")
plt.ylabel("Nb Losses") plt.ylabel("Nb Losses")
plt.show() plt.show()
Benchmark/benchmark_20_09_2021-19:50:01;;
∆𝜆;STD;PSNR
NumEpochs:;1;
BestEpoch;1;
avg;0.9043345;10.900126642273504
0;;
1;;
1,5;;
2;;
2,5;;
CT;10j;