Commit 17f01bda authored by Touklakos's avatar Touklakos
Browse files

Portage vers la bibliothèque PyTorch. Les anciens éléments de tensorflow ont...

Portage vers la bibliothèque PyTorch. Les anciens éléments de tensorflow ont été enregistrés sous la forme nomFichier_tsf.py
parent 916aa980
import argparse
def parse():
'''
Add arguments.
'''
parser = argparse.ArgumentParser(
description='DnCNN')
parser.add_argument('--input_dir', type=str, default='./PyTorchCheckpoint/', help='directory of saved checkpoints for denoising operation or retraining')
parser.add_argument('--clean_train', type=str, default='data1/img_clean_train_1-2-3-4-5_0_two_50_50_9.npy', help='filepath of noise clean file for training')
parser.add_argument('--noisy_train', type=str, default='data1/img_noisy_train_1-2-3-4-5_0_two_50_50_9.npy', help='filepath of noisy file for training')
parser.add_argument('--clean_eval', type=str, default='data1/img_clean_train_1-2-3-4-5_0_two_50_50_9.npy', help='filepath of noise clean file for eval')
parser.add_argument('--noisy_eval', type=str, default='data1/img_noisy_train_1-2-3-4-5_0_two_50_50_9.npy', help='filepath of noisy file for eval')
parser.add_argument('--num_epochs', type=int, default=200, help='number of epochs to train')
parser.add_argument('--D', type=int, default=4, help='number of dilated convolutional layer')
parser.add_argument('--C', type=int, default=64, help='kernel size of convolutional layer')
parser.add_argument('--plot', action='store_true', help='plot loss during training')
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate for training')
parser.add_argument('--image_size', type=int, nargs='+', default=(50, 50), help='size of training images')
parser.add_argument('--batch_size', type=int, default=384)
parser.add_argument('--epoch', type=int, default=None, help='epoch\'s number from which we going to retrain')
parser.add_argument('--test_mode', action='store_true',help='testing phase')
parser.add_argument('--image_mode', type=int, default=1, help='1 or 3 (black&white or RGB)')
parser.add_argument('--tsf', action='store_true',help='add if code in tensorflow')
parser.add_argument('--graph', action='store_true',help='add if graph is visible')
parser.add_argument('--graph_fin', action='store_true',help='add if graph is visible during training')
# Tensorflow arguments
parser.add_argument('--use_gpu', dest='use_gpu', type=int, default=1, help='gpu flag, 1 for GPU and 0 for CPU')
parser.add_argument('--checkpoint_dir', dest='ckpt_dir', default='./checkpoint', help='models are saved here')
parser.add_argument('--sample_dir', dest='sample_dir', default='./sample', help='sample are saved here')
parser.add_argument('--test_dir', dest='test_dir', default='./test', help='test sample are saved here')
parser.add_argument('--params', dest='params', type=str, default='', help='hyper parameters')
parser.add_argument('--test_noisy_img', dest='noisy_img', type=str, help='name and directory of the noisy image for testing')
parser.add_argument('--test_noisy_key', dest='noisy_key', type=str, help='name of the key for noisy matlab image for testing')
parser.add_argument('--test_clean_img', dest='clean_img', type=str, help='name and directory of the clean image for testing (eventually none)')
parser.add_argument('--test_clean_key', dest='clean_key', type=str, help='name of the key for clean matlab image for testing (eventually none)')
parser.add_argument('--test_flip', dest='flip', type=bool, default=False, help='option for upside down flip of noisy (and clean) test image')
parser.add_argument('--test_ckpt_index', dest='ckpt_index', type=str, default='', help='name and directory of the checkpoint that will be restored.')
parser.add_argument('--save_dir', dest='save_dir', default='./data1/', help='dir of patches')
parser.add_argument('--exp_file', dest='exp_file', help='experiment file')
return parser.parse_args()
class Args():
'''
For jupyter notebook
Not up to date
'''
def __init__(self):
self.root_dir = '../dataset/BSDS300/images'
self.output_dir = '../checkpoints/'
self.noisy = 'data1/img_noisy_train_1-2-3-4-5_0_two_50_50_9.npy'
self.clean = 'data1/img_clean_train_1-2-3-4-5_0_two_50_50_9.npy'
self.num_epochs = 200
self.D = 4
self.C = 64
self.plot = False
self.model = 'dudncnn'
self.lr = 1e-3
self.image_size = (180, 180)
self.test_image_size = (320, 320)
self.batch_size = 60
self.sigma = 30
self.is_training = False
self.image_mode = 1
self.graph = False
import os
import torch
import torch.utils.data as td
import torchvision as tv
import numpy as np
from PIL import Image
from utils import *
from argument import *
class NoisyBSDSDataset(td.Dataset):
""" This class allow us to load and use data needed for model training
"""
def __init__(self, clean, noisy, image_mode, image_size):
""" Initialize the data loader
Arguments:
clean(String) : The path of clean data
noisy(String) : The path of noisy data
image_mode(int) : The number of channel of the clean and noisy data
image_size((int, int)) : The size (in pixels) of clean and noisy data
"""
super(NoisyBSDSDataset, self).__init__()
self.image_mode = image_mode
self.image_size = image_size
self.training_name = noisy
print("clean : ", clean)
print("noisy : ", noisy)
self.clean, self.noisy = load_train_data(filepath=clean, noisyfilepath=noisy, phase_type="two")
rdm = np.random.randint(0, 2, self.clean.shape[0])
self.clean = normalize_data(self.clean, 'two', rdm, True)
self.noisy = normalize_data(self.noisy, 'two', rdm, True)
def __len__(self):
return len(self.clean)
def __repr__(self):
return "NoisyBSDSDataset(image_mode={}, image_size={})". \
format(self.image_mode, self.image_size)
def getTrainingName(self):
return self.training_name
def __getitem__(self, idx):
cleanSample = self.clean[idx]
noisySample = self.noisy[idx]
cleanSample = cleanSample.reshape(self.image_mode, self.image_size[0], self.image_size[1])
noisySample = noisySample.reshape(self.image_mode, self.image_size[0], self.image_size[1])
cleanSample = torch.Tensor(cleanSample)
noisySample = torch.Tensor(noisySample)
return noisySample, cleanSample
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg#, NavigationToolbar2TkAgg
import multiprocessing
import time
import random
from tkinter import *
import torch
from utils import *
import nntools as nt
from model import *
from data import *
from argument import *
from main_holo_tsf import *
import numpy as np
import datetime
from argument import *
#Create a window
window=Tk()
# Main need arguments to use it
# ex: python main_test.py --output_dir ./PyTorchCheckpoint/
def main():
args = parse()
max_epochs = args.num_epochs
if(args.exp_file):
# exp_file = './PyTorchCheckpoint/experiment_08_02_2021-18:21:44'
exp_file = args.exp_file
else:
# input_dir = './PyTorchCheckpoint/'
input_dir = args.output_dir
checkpoint = None
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print("[*] Loading data...")
while(checkpoint is None):
try: #Try to check if there is data in the queue
exp_file = 'experiment_{}'.format(datetime.datetime.now().strftime("%d_%m_%Y-%H:%M:%S"))
exp_file = input_dir + exp_file
checkpoint = torch.load(os.path.join(exp_file, "checkpoint_{:0>5}.pth.tar".format(0)), map_location=device)
except:
time.sleep(0)
print("[*] Load successfully...")
# ===================================
#Create a queue to share data between process
q = multiprocessing.Queue()
# time.sleep(1)
#Create and start the simulation process
simulate=multiprocessing.Process(None,simulation,args=(q,exp_file,max_epochs))
simulate.start()
#Create the base plot
plot()
#Call a function to update the plot when there is new data
updateplot(q,exp_file,max_epochs)
window.mainloop()
print ('Done')
# ===================================
def plot(): #Function to create the base plot, make sure to make global the lines, axes, canvas and any part that you would want to update later
global line,ax,canvas
fig = matplotlib.figure.Figure()
ax = fig.add_subplot(1,1,1,title="Evaluation losses Graph",xlabel="Epochs Number",ylabel="Losses")
canvas = FigureCanvasTkAgg(fig, master=window)
# ax.xlabel("Epochs")
# plt.ylabel("losses")
window.wm_title("Evaluation losses Graph")
canvas.draw()
canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)
canvas._tkcanvas.pack(side=TOP, fill=BOTH, expand=1)
def update_checkpoint(exp_file,max):
checkpoint = None
test = None
device = 'cuda' if torch.cuda.is_available() else 'cpu'
while(checkpoint is None):
try: #Try to check if there is data in the queue
checkpoint = torch.load(os.path.join(exp_file, "checkpoint_{:0>5}.pth.tar".format(max)), map_location=device)
except:
max-=1
return checkpoint
def updateplot(q,ex_file,max_epochs):
try: #Try to check if there is data in the queue
result=q.get_nowait()
if result !='Q':
checkpoint = update_checkpoint(ex_file,max_epochs)
history = checkpoint['History']
loss_tab_val = []
loss_tab_eval = []
for k,v in (history):
loss_tab_val = np.append(loss_tab_val,round(k['loss'],6))
loss_tab_eval = np.append(loss_tab_eval,round(v['loss'],6))
y1 = np.arange(0,len(loss_tab_val))
x1 = loss_tab_val
x2 = loss_tab_eval
line, = ax.plot(y1, x1,'r--')
line, = ax.plot(y1, x2,'b')
ax.draw_artist(line)
canvas.draw()
window.after(200,updateplot,q,ex_file,max_epochs)
else:
print ('done')
except:
print( "empty")
window.after(200,updateplot,q,ex_file,max_epochs)
def simulation(q,ex_file,max_epochs):
checkpoint = update_checkpoint(ex_file,max_epochs)
history = checkpoint['History']
loss_tab = []
for k,v in (history):
loss_tab = np.append(loss_tab,round(k['loss'],6))
q.put(loss_tab)
# q.put(loss_tab)
while(1):
time.sleep(0.2)
q.put("not Q")
q.put('Q')
if __name__ == '__main__':
main()
......@@ -36,9 +36,9 @@ __status__ = "Production"
# Default hyperparameters:
hparams = tf.contrib.training.HParams(
#to train on HOLODEEP tiff images
noise_src_dir = '/lium/raid01_c/tahon/holography/HOLODEEP/',
clean_src_dir = '/lium/raid01_c/tahon/holography/NOISEFREEHOLODEEP/',
eval_dir = '/lium/raid01_c/tahon/holography/HOLODEEPmat/',
noise_src_dir = '/info/etu/m1/s160128/Documents/M1/DnCnn/Portage-reseau-de-neurones-de-Keras-vers-PyTorch/dncnn-tensorflow-holography-master/Holography/DATABASE/',
clean_src_dir = '/info/etu/m1/s160128/Documents/M1/DnCnn/Portage-reseau-de-neurones-de-Keras-vers-PyTorch/dncnn-tensorflow-holography-master/Holography/DATABASE/',
eval_dir = '/info/etu/m1/s160128/Documents/M1/DnCnn/Portage-reseau-de-neurones-de-Keras-vers-PyTorch/dncnn-tensorflow-holography-master/Holography/DATABASE/',
#to train on matlab images
#eval_dir = '/lium/raid01_c/tahon/holography/HOLODEEPmat/',
#to train on natural images
......@@ -46,14 +46,14 @@ hparams = tf.contrib.training.HParams(
#clean_src_dir = '/lium/raid01_c/tahon/holography/NATURAL/original',
#eval_dir = '/lium/raid01_c/tahon/holography/HOLODEEPmat/',
#test_dir = 'lium/raid01_c/tahon/holography/TEST/',
phase = 'test', #train or test phase
phase = 'train', #train or test phase
#image
isDebug = False, #True,#reate only 10 patches
originalsize = (1024,1024), #1024 for matlab database, 128 for holodeep database, 180 for natural images
phase_type = 'two', #keep phase between -pi and pi (phi), convert into cosinus (cos) or sinus (sin)
#select images for training
train_patterns = [1, 2, 3, 4, 5], #number of images from 1 to 5
train_noise = '0', #[0, 1, 1.5, 2, 2.5],
train_noise = '0-1-1.5-2-2.5', #[0, 1, 1.5, 2, 2.5],
#select images for evaluation (during training)
eval_patterns = [1, 2, 3, 4, 5],
eval_noise = '0-1-1.5-2-2.5',
......@@ -67,15 +67,17 @@ hparams = tf.contrib.training.HParams(
#Training
nb_layers = 4,#original number is 16
batch_size = 128,#128
patch_per_image = 384, #9 pour des images 180*180 (NATURAL) Silvio a utilisé 384 pour des images 1024*1024 (MATLAB)
patch_per_image = 384, #384, #9 pour des images 180*180 (NATURAL) Silvio a utilisé 384 pour des images 1024*1024 (MATLAB)
patch_size = 50, #Silvio a utilisé 50.
epoch = 350,#2000
epoch = 10,#2000
lr = 0.0005, # learning rate
stride = 50, # spatial step for cropping images values from initial script 10
step = 0, #initial spatial setp for cropping
scales = [1] #[1, 0.9, 0.8, 0.7] # scale for data augmentation
scales = [1], #[1, 0.9, 0.8, 0.7] # scale for data augmentation
chosenIteration = '' #chosen iteration to load for traning or testing
)
def hparams_debug_string():
values = hparams.values()
hp = [' %s: %s' % (name, values[name]) for name in sorted(values)]
......
rep=$(pwd)
python main_holo.py --test_noisy_img $rep/Holography/DATABASE/PATTERN1/MFH_0/NoisyPhase.mat --test_noisy_key 'NoisyPhase' --test_clean_img $rep/Holography/DATABASE/PATTERN1/PhaseDATA.mat --test_clean_key 'Phase' --test_flip False --params "phase=test" --test_ckpt_index $rep/holography/checkpoints:run-test2021-01-13_09\:57:\27.958861/
rep=$(pwd)
python main_holo_tsf.py --checkpoint_dir $rep/holography/checkpoints/$1 --sample_dir $rep/holography/eval_samples/ --params "chosenIteration=${2}, phase=train" --save_dir "./data1/"
# -*- coding: utf-8 -*-
#
# This file is part of DnCnn4Holo.
#
# DnCnn4Holo is a python script for phase image denoising.
# Home page: https://git-lium.univ-lemans.fr/tahon/dncnn-tensorflow-holography
#
# Adapted from https://github.com/wbhu/DnCNN-tensorflow by Hu Wenbo
#
# DnCnn4Holo is free software: you can redistribute it and/or modify
# it under the terms of the GNU LLesser General Public License as
# published by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# DnCnn4Holo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DnCnn4Holo. If not, see <http://www.gnu.org/licenses/>.
"""
Copyright 2019-2020 Marie Tahon
:mod:`main_holo` train and test dncnn model on holography phase images
"""
import argparse
from glob import glob
import tensorflow as tf
from hparams import hparams, hparams_debug_string
from model import denoiser
import torch
import time
from utils import *
import nntools as nt
from model import *
from data import *
from argument import *
#from main_holo_tsf import test_main
__license__ = "LGPL"
__author__ = "Marie Tahon"
__copyright__ = "Copyright 2019-2020 Marie Tahon"
__maintainer__ = "Marie Tahon"
__email__ = "marie.tahon@univ-lemans.fr"
__status__ = "Production"
#__docformat__ = 'reStructuredText'
#import ipdb
parser = argparse.ArgumentParser(description='')
parser.add_argument('--use_gpu', dest='use_gpu', type=int, default=1, help='gpu flag, 1 for GPU and 0 for CPU')
parser.add_argument('--checkpoint_dir', dest='ckpt_dir', default='./checkpoint', help='models are saved here')
parser.add_argument('--sample_dir', dest='sample_dir', default='./sample', help='sample are saved here')
parser.add_argument('--test_dir', dest='test_dir', default='./test', help='test sample are saved here')
parser.add_argument('--params', dest='params', type=str, default='', help='hyper parameters')
parser.add_argument('--test_noisy_img', dest='noisy_img', type=str, default='', help='name and directory of the noisy image for testing')
parser.add_argument('--test_noisy_key', dest='noisy_key', type=str, default='', help='name of the key for noisy matlab image for testing')
parser.add_argument('--test_clean_img', dest='clean_img', type=str, default='', help='name and directory of the clean image for testing (eventually none)')
parser.add_argument('--test_clean_key', dest='clean_key', type=str, default='', help='name of the key for clean matlab image for testing (eventually none)')
parser.add_argument('--test_flip', dest='flip', type=bool, default=False, help='option for upside down flip of noisy (and clean) test image')
parser.add_argument('--test_ckpt_index', dest='ckpt_index', type=str, default='', help='name and directory of the checkpoint that will be restored.')
parser.add_argument('--save_dir', dest='save_dir', default='./data1/', help='dir of patches')
args = parser.parse_args()
#ipdb.set_trace()
hparams.parse(args.params)
#hparams.train_noise = hparams.train_noise.split('-')
def denoiser_train(denoiser, lr):
#avec load_data les images sont déjà normalisée par 255.0
sess_name = extract_sess_name(hparams.train_patterns, hparams.train_noise, hparams.phase_type, hparams.stride, hparams.patch_size, hparams.patch_per_image)
#for training with natural images
#sess_name = 'natural_phi'
print('session name: ', sess_name)
train_data= load_train_data(filepath=args.save_dir + 'img_clean_train_' + sess_name + '.npy', noisyfilepath=args.save_dir + 'img_noisy_train_' + sess_name + '.npy', phase_type=hparams.phase_type)
# if there is a small memory, please comment this line and uncomment the line99 in model.py
#eval_files = glob('./data/test/{}/*.png'.format(args.eval_set))
#load eval data from HOLODEEP database
#eval_data = from_HOLODEEP(hparams.noise_src_dir, hparams.clean_src_dir, hparams.eval_noise, hparams.eval_patterns, hparams.isDebug)
#load eval data from DATABASE Matlab
eval_data = load_eval_data(hparams.eval_dir, hparams.eval_noise, hparams.eval_patterns)
print('train data shape:', train_data[0].shape, type(train_data))
print('eval data shape:', eval_data[0][0].shape, type(eval_data))
denoiser.train(train_data, eval_data, batch_size=hparams.batch_size, ckpt_dir=args.ckpt_dir, epoch=hparams.epoch, lr=lr, sample_dir=args.sample_dir, phase_type=hparams.phase_type, nb_layers=hparams.nb_layers)
def denoiser_test(denoiser):
#key = 'NoisyPhase' and 'Phase' for HOLODEEP
#key = 'Phaseb' and 'Phase' for DATA_1, DATA_20 and VibMap
print('toto')
print(args.noisy_key, args.clean_key)
noisy = load_test_data(args.noisy_img, key = args.noisy_key, flipupdown = args.flip) #pour DATA_1, DATA_20 et VibPhase
print('load noisy ref')
if args.clean_img:
print('load clean ref')
clean = load_test_data(args.clean_img, key = args.clean_key, flipupdown = args.flip)
else:
clean = noisy
test_files = (clean, noisy)
def save_clean_pred_rad(args, exp, clean_pred_rad, noisy, clean, nom_img = "NoisyPhase"):
"""This method is used to save the result of a de-noising operation
Arguments:
args (ArgumentParser) : The different info used to do and save the de-noising operation
exp (Experiment) : The de-noising model
clean_pred_rad (numpy.array) : The de-noised image
noisy (numpy.array) : The noised image
clean (numpy.array) : The noise free image
nom_img (str, optional) : The saving name for the result
"""
save_name = os.path.join("TestImages", os.path.basename(os.path.normpath(args.input_dir)))
if not os.path.exists(save_name):
os.makedirs(save_name)
save_images(os.path.join(save_name , '%s-noisy.tiff' % (nom_img)), noisy)
save_images(os.path.join(save_name , '%s-clean.tiff' % (nom_img)), clean)
save_images(os.path.join(save_name , '%s-%d.tiff' % (nom_img, exp.epoch)), clean_pred_rad)
save_MAT_images(os.path.join(save_name , '%s-%d.mat' % (nom_img, exp.epoch)), clean_pred_rad)
epoch = exp.epoch
psnr = cal_psnr(rad_to_flat(clean_pred_rad), rad_to_flat(clean))
std = cal_std_phase(clean_pred_rad, clean)
print("\n\n")
print("epoch : ", epoch)
print("psnr : ", psnr)
print("std : ", std)
print("\n\n")
with open(os.path.join(save_name , '%s-%d.res' % (nom_img, exp.epoch)), 'w') as f:
print("epoch : ", epoch, file=f)
print("psnr : ", psnr, file=f)
print("std : ", std, file=f)
def evaluate_on_HOLODEEP(args, exp):
"""This method is used to run an evaluation on the training database
Arguments:
args (ArgumentParser) : The different info used to do and save the de-noising operations
exp (Experiment) : The de-noising model
"""
clean, noisy = from_DATABASE(os.path.join("Holography", "DATABASE/"), "0-1-1.5-2-2.5", [1,2,3,4,5], False)
clean = np.array(clean)
noisy = np.array(noisy)
running_std = 0
for i in range(noisy.shape[0]):
print("\n\n", i)
clean_pred_rad = denoise_img(args, noisy[i], clean[i], "test-{:0>2}".format(i), exp)
std = cal_std_phase(clean_pred_rad, clean[i])
running_std += std
print("average_std : ", running_std/noisy.shape[0])
def evaluate_on_DATAEVAL(args, exp):
"""This method is used to run an evaluation on the three test images
test_dir, test_name = os.path.split(args.noisy_img)
sess_name = args.ckpt_index.split('/')[-2]
print(sess_name)
save_dir = test_dir + '/' + sess_name + '/'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
denoiser.test(test_files, ckpt_dir=args.ckpt_index, save_dir=save_dir, save_name=test_name, phase_type= hparams.phase_type)
def main(_):
print(hparams_debug_string())
if not os.path.exists(args.ckpt_dir):
os.makedirs(args.ckpt_dir)
if not os.path.exists(args.sample_dir):
os.makedirs(args.sample_dir)
if not os.path.exists(args.test_dir):
os.makedirs(args.test_dir)
lr = hparams.lr * np.ones([hparams.epoch])
lr[30:] = lr[0] / 10.0
if args.use_gpu:
# added to control the gpu memory
print("GPU\n")
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
model = denoiser(sess, hparams.noise_type, sigma=hparams.sigma)
if hparams.phase == 'train':
denoiser_train(model, lr=lr)
elif hparams.phase == 'test':
denoiser_test(model)
else:
print('[!]Unknown phase')
exit(0)
Arguments:
args (ArgumentParser) : The different info used to do and save the de-noising operations
exp (Experiment) : The model used to do the de-noising operation
"""
dir_name = os.path.join("Holography", "DATAEVAL", "DATAEVAL")
nameList = ["DATA_1_Phase_Type1_2_0.25_1.5_4_50.mat", "DATA_20_Phase_Type4_2_0.25_2.5_4_100.mat", "VibPhaseDATA.mat"]
dataList = []
for name in nameList:
dataList.append(( load_test_data(os.path.join(dir_name, name), key = "Phaseb", flipupdown = True),
load_test_data(os.path.join(dir_name, name), key = "Phase", flipupdown = True)))
for idx, (noisy, clean) in enumerate(dataList):
denoise_img(args, noisy, clean, os.path.basename(nameList[idx]), exp)
def denoise_img(args, noisy, clean, name, exp, nb_iteration=3):
"""This method is used to do and save a de-noising operation on a given image
Arguments: