Commit eb065843 authored by Mano Brabant's avatar Mano Brabant
Browse files

Second version of the benchmark's tool

parent 512a4b09
......@@ -12,6 +12,8 @@ import os
import fnmatch
from statistics import mean
import csv
import collections
import copy
from argument import *
......@@ -23,21 +25,20 @@ os.environ['MKL_THREADING_LAYER'] = 'GNU'
import random
#Usefull ?
random.seed(10)
valid_args = vars(parse())
class Report(object):
"""
This class represent a raport for a benchmark
This class represent a report for a benchmark
"""
def __init__(self, benchmark):
"""
This ocnstructor create a new report for a benchmark
This constructor create a new report for a benchmark
:param benchmark: The benchmark from which a report will be made
"""
......@@ -48,11 +49,12 @@ class Report(object):
self.benchmark = benchmark
def make_report(self):
input_dir = self.benchmark.getInputDir() + "/Test/"
def make_report(self, num_config=0):
"""
This method take the results made by the tests of different configurations and register them
"""
print(input_dir)
input_dir = self.benchmark.params['input_dir'] + "/Test/"
for root, dirs, files in os.walk(input_dir):
for file in fnmatch.filter(files, "*.res"):
......@@ -64,35 +66,57 @@ class Report(object):
self.res_std.append(float(list(filter(lambda string : string.startswith('std'), lines.split('\n')))[0].split(':')[1]))
print(input_dir)
print("Average psnr : ", mean(self.res_psnr))
print("Average std : ", mean(self.res_std))
print("\n\n\n\n\n")
def toCSV(self, num_config=0):
"""
This method write into the csv file the results made during the benchmark
"""
#Number of row for the final list to print in the CSV file
num_row = 100
def toCSV(self):
#Number of line before the results
offset = 0
if(len(self.res_psnr) == 0):
self.make_report()
mylist = [[]]
mydict = collections.defaultdict(lambda: collections.defaultdict(dict))
with open(self.benchmark.file_path, 'r', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=';')
mylist = list(reader)
print(mylist)
print(mylist[0])
print(mylist[2])
for i in range(len(mylist)):
if(mylist[i][0] == "Results"):
offset = i
mydict[0][1+num_config] = self.benchmark.params['input_dir']
mydict[2+offset][1+num_config] = self.benchmark.params['num_epochs']
mydict[3+offset][1+num_config] = self.benchmark.params['num_epochs'] #Not able to take the best performing epoch for now (parse the traing.txt file after using the --perform_validation arg for the training process)
mydict[4+offset][1+num_config] = mean(self.res_std)
mydict[5+offset][1+num_config] = mean(self.res_psnr)
for i in range(len(self.res_std)):
mydict[6+offset+i][1+num_config] = self.res_std[i]
mydict[6+offset+i+len(self.res_std)][1+num_config] = self.res_psnr[i]
final_list = [[''] * (self.benchmark.getNbTest() + 1)] * num_row
for i in range(len(mylist)):
final_list[i] = mylist[i]
for k1 in mydict.keys():
for k2 in mydict[k1].keys():
final_list[k1][k2] = copy.deepcopy(mydict[k1][k2])
mylist[0][0] = self.benchmark.input_dir
mylist[2][1] = "1"
mylist[3][1] = "1"
mylist[4][1] = mean(self.res_std)
mylist[4][2] = mean(self.res_psnr)
with open(self.benchmark.file_path, 'w', newline='') as csvfile:
......@@ -100,7 +124,7 @@ class Report(object):
quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerows(mylist)
spamwriter.writerows(final_list)
......@@ -110,7 +134,7 @@ class Benchmark(object):
"""
def __init__(self, file_path):
def __init__(self, file_path, sbatch=False):
"""
This constructor create a new benchmark for a csv file
......@@ -119,45 +143,71 @@ class Benchmark(object):
self.input_dir = 'Benchmark/benchmark_{}'.format(datetime.datetime.now().strftime("%d_%m_%Y-%H:%M:%S"))
self.valid_args = vars(parse())
self.nb_config = 0
self.file_path = file_path
self.params = dict()
self.sbatch = sbatch
def getParam(self):
self.getParam()
def getParam(self, num_config=0):
"""
This method get the changing param from the csv file
"""
for arg in valid_args:
if(isinstance(valid_args[arg], tuple)):
self.params[arg] = ' '.join([str(i) for i in valid_args[arg]])
# Get valid args
for arg in self.valid_args:
if(isinstance(self.valid_args[arg], tuple)):
self.params[arg] = ' '.join([str(i) for i in self.valid_args[arg]])
else:
self.params[arg] = valid_args[arg]
self.params[arg] = self.valid_args[arg]
# Get modified args
with open(self.file_path, 'r', newline='') as csvfile:
spamreader = csv.reader(csvfile, delimiter=';', quotechar='|')
for row in spamreader:
if(len(row) >= 2 and row[0] in valid_args):
self.params[row[0]] = row[1]
if(len(row) - 1 > self.nb_config):
self.nb_config = len(row) - 1
if(len(row) > self.getNbTest() and row[0] in self.valid_args and row[1+num_config] != ''):
self.params[row[0]] = row[1+num_config]
self.params['input_dir'] = self.input_dir
self.params['input_dir'] = self.input_dir + "/Test_{}/".format(num_config)
self.params['save_test_dir'] = '.'
print(self.params)
def get_sbatch_config(self):
ret = """
#!/bin/bash
#SBATCH -p gpu
#SBATCH --gres gpu:1
"""
return ret
def getNbTest(self):
"""
This method return the number of test launched by the benchmark
(Just one configuration at a time for now)
"""
return 1
return self.nb_config
def getInputDir(self):
"""
......@@ -220,9 +270,7 @@ class Benchmark(object):
self.params['clean_eval'] = os.path.join(self.params['save_dir'], "img_clean_train_" + sess_name + ".npy")
self.params['noisy_eval'] = os.path.join(self.params['save_dir'], "img_noisy_train_" + sess_name + ".npy")
print(self.params)
#print("exit_codes :", exit_codes)
......@@ -234,16 +282,41 @@ class Benchmark(object):
process = []
input_dir = self.getInputDir()
output_dir = '/'
list_params = self.get_params_string()
cmd = '''
python3 main_holo.py --num_epochs 1 --output_dir {} {} &
cmd_python = '''
python3 main_holo.py --output_dir {} {} &
'''.format(output_dir, list_params).replace("\n", "")
cmd_path = self.params['input_dir'] + "/cmd_train"
p = subprocess.Popen(cmd, shell=True,
with open(cmd_path , 'w') as cmd_file:
if(self.sbatch):
print(self.get_sbatch_config(), file=cmd_file)
print(cmd_python, file=cmd_file)
cmd_bash = '''
chmod 755 {};
./{} &
'''.format(cmd_path, cmd_path).replace("\n", "")
if(self.sbatch):
cmd_bash = '''
chmod 755 {};
sbatch {}
'''.format(cmd_path, cmd_path).replace("\n", "")
print("\n\n\n")
print("Training CMD : ", cmd_python)
print("\n\n\n")
p = subprocess.Popen(cmd_bash, shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
......@@ -254,8 +327,6 @@ class Benchmark(object):
exit_codes = [p.communicate() for p in process]
#print("exit_codes :", exit_codes)
def launch_benchmark_testing(self):
"""
......@@ -265,19 +336,45 @@ class Benchmark(object):
process = []
input_dir = self.getInputDir()
save_test_dir = '.'
list_params = self.get_params_string()
cmd = '''
cmd_python = '''
python3 main_holo.py --test_mode --save_test_dir {} {} &
'''.format(save_test_dir, list_params).replace("\n", "")
print("Testing CMD : ", cmd)
cmd_path = self.params['input_dir'] + "/cmd_test"
p = subprocess.Popen(cmd, shell=True,
with open(cmd_path , 'w') as cmd_file:
if(self.sbatch):
print(self.get_sbatch_config(), file=cmd_file)
print(cmd_python, file=cmd_file)
cmd_bash = '''
chmod 755 {};
./{} &
'''.format(cmd_path, cmd_path).replace("\n", "")
if(self.sbatch):
cmd_bash = '''
chmod 755 {};
sbatch {}
'''.format(cmd_path, cmd_path).replace("\n", "")
print("\n\n\n")
print("Testing CMD : ", cmd_python)
print("\n\n\n")
p = subprocess.Popen(cmd_bash, shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
......@@ -287,17 +384,16 @@ class Benchmark(object):
exit_codes = [p.communicate() for p in process]
#print("exit_codes :", exit_codes)
def summaryRes(self):
def summaryRes(self, num_config=0):
"""
This method make a summary for the benchmark
"""
report = Report(self)
report.make_report()
report.toCSV()
report.make_report(num_config)
report.toCSV(num_config)
......@@ -307,40 +403,57 @@ class Benchmark(object):
that has been configured by the csv file
"""
self.getParam()
os.makedirs(self.input_dir)
for i in range(self.getNbTest()):
os.makedirs(self.input_dir)
self.getParam(i)
print(self.toString())
os.makedirs(self.params['input_dir'])
with open('{}/config_benchmark.txt'.format(self.input_dir), "w+") as f:
print(self.toString(), file=f)
with open('{}/config_benchmark.txt'.format(self.params['input_dir']), "w+") as f:
print(self.toString(), file=f)
timeData = time.time()
self.launch_benchmark_data()
print("Time elapsed configuring the data : ", time.time() - timeData)
timeElapsed = time.time()
self.launch_benchmark_data()
print("Time elapsed configuring the data : ", time.time() - timeElapsed)
timeElapsed = time.time()
self.launch_benchmark_training()
print("Time elapsed training : ", time.time() - timeElapsed)
timeElapsed = time.time()
self.launch_benchmark_training()
print("Time elapsed training : ", time.time() - timeElapsed)
timeElapsed = time.time()
self.launch_benchmark_testing()
print("Time elapsed testing : ", time.time() - timeElapsed)
timeElapsed = time.time()
self.launch_benchmark_testing()
print("Time elapsed testing : ", time.time() - timeElapsed)
self.summaryRes()
self.summaryRes(i)
if __name__ == '__main__':
file_name = 'res_brut.csv'
sbatch = False
try:
file_name = sys.argv[1]
sbatch = sys.argv[2] == "sbatch"
except Exception as e:
pass
#Remove all argv (to remove error with the ArgumentParser in argument.py)
sys.argv = sys.argv[:1]
print("file_name : ", file_name)
timeElapsed = time.time()
benchmark = Benchmark('res_brut.csv')
benchmark = Benchmark(file_name, sbatch)
print("Number of test : ", benchmark.getNbTest())
benchmark.launch_benchmark()
print("Time elapsed : ", time.time() - timeElapsed)
Benchmark/benchmark_20_09_2021-19:50:01;;
∆𝜆;STD;PSNR
NumEpochs:;1;
BestEpoch;1;
avg;0.9043345;10.900126642273504
0;;
1;;
1,5;;
2;;
2,5;;
CT;10j;
;;
train_noises;0;
train_patterns;1;
original_size;1024 1024;
patch_size;50;
stride;50;
step;0;
patch_per_image;4;
lr;0.0005;
num_epochs;1;
D;1;
nb batches ;60;
batch_size;128;
;;
nb_rotation;1;
eval_noises;0;
eval_patterns;1;
;;
test_noises;0;
test_patterns;1;
;Benchmark/benchmark_24_09_2021-16:56:58/Test_0/;Benchmark/benchmark_24_09_2021-16:56:58/Test_1/
;;
train_noises;0-1-1.5-2-2.5;0-1-1.5-2-2.5
train_patterns;1 2 3 4 5;1 2 3 4 5
original_size;1024 1024;1024 1024
patch_size;50;50
stride;50;50
step;0;0
patch_per_image;4;4
lr;0.0005;0.0005
num_epochs;1;1
D;1;1
nb batches ;60;60
batch_size;128;128
;;
nb_rotation;1;1
nb_iteration;1;3
;;
;;
test_noises;0;0
test_patterns;1;1
;;
;;
;;
;;
;;
;;
;;
;;
Results;;
∆𝜆;;
NumEpochs:;1;1
BestEpoch;1;1
avg;0.3726104;0.3707522
img1;18.301960232394684;17.89311946826126
img2;0.3726104;0.3707522
img3;18.301960232394684;17.89311946826126
;;
img1;;
img2;;
img3;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;;
;Benchmark/benchmark_21_09_2021-09:31:57
;
train_noises;0
train_patterns;1
original_size;1024 1024
patch_size;50
stride;50
step;0
patch_per_image;4
lr;0.0005
num_epochs;1
D;1
nb batches ;60
batch_size;128
;
nb_rotation;1
eval_noises;0
eval_patterns;1
;
test_noises;0
test_patterns;1
;
;
;
input_dir;
output_dir;
train_dir;
eval_dir;
;
;
test_dir;
save_test_dir;
train_patterns;
train_noises;
eval_patterns;
eval_noises;
;
input_dir;
output_dir;
train_dir;
eval_dir;
;
;
test_dir;
save_test_dir;
train_patterns;
train_noises;
eval_patterns;
eval_noises;
;
;
lr;
train_image_size;
eval_image_size;
test_image_size;
image_mode;
batch_size;
epoch;
test_mode;
;
;
;
nb_iteration;
nb_rotation;
patch_size;
stride;
step;
phase_type;
patch_per_image;
;
perform_validation;
originalsize;
;
;
Resultats;
∆𝜆;STD/PSNR
NumEpochs:;1
BestEpoch;1
avg;1.0152992
img1;1
img2;2
img3;3
;
img1;2
img2;3
img3;4