Commit fd2c39b9 authored by Touklakos's avatar Touklakos
Browse files

Remove Keras (generate without hparams), Add rotation, comment utils.py, eval on 1024x1024 images

parent 96d1f8f8
......@@ -115,61 +115,3 @@ Epochs | lr | train noise | nb batches | layers | duration | 0 | 1 |
- [x] check final evaluation phase
- [ ] check loading checkpoint (but not latest) from a previous model
- [ ] move to PyTorch
##Manual
You have to manualy download the holography database and put it in the directory.
The database can be found on skinner /info/etu/m1/s171085/Projets/Portage-Keras-PyTorch/Portage-reseau-de-neurones-de-Keras-vers-PyTorch/dncnn-tensorflow-holography-master/Holography (a new link will be available later)
The application can be used with the main_holo.py script with different arguments from the argument.py script
To start a training with default param you can use the command
```
#launch a training
python3 main_holo.py
```
You can precise the training and eval data with the arguments noisy_train, clean_train, noisy_eval and clean_eval
The usable data are generated with the generate_patches_holo.py and generate_patches_holo_fromMAT.py scripts and saved in a directory named "data1".
```
#launch a training with the following data
python3 main_holo.py --noisy_train data1/img_noisy_train_1-2-3-4-5_0-1-1.5-2-2.5_two_50_50_384.npy --clean_train data1/img_clean_train_1-2-3-4-5_0-1-1.5-2-2.5_two_50_50_384.npy --noisy_eval data1/img_noisy_train_1-2-3-4-5_0-1-1.5-2-2.5_two_50_50_384.npy --clean_eval data1/img_clean_train_1-2-3-4-5_0-1-1.5-2-2.5_two_50_50_384.npy
```
You can also precise the different hyperparameter for the training
num_epoch is the number of epoch the model will train
D is the number of res block
C is the kernel size for convolutional layer (not tested)
```
#launch a training with the following params
python3 main_holo.py --num_epoch 500 --D 16 --C 32
```
For data adapation you have to give the size and the number of channel of the image you will be using in the argument image_size and image_mode (for training or testing).
```
#launch a training in which the image will be 50 by 50 in black and white
python3 main_holo.py --image_size 50 50 --image_mode 1
```
The arguments input_dir and epoch are used for re-training and de-noising operation.
In input_dir give the path to the model you want to use, and in epoch give the number from which you want to re-train or do a de-noising operation.
The model are saved in a directory named "PyTorchExperiments"
```
#re launch a training strating from the model experiment_xxx at the epoch 130
python3 main_holo.py --input_dir PyTorchExperiments/experiment_xxx --epoch 130
```
To do a de-noising operation you can use the test_mode argument.
You can use the argument test_noisy_img, test_noisy_key, test_clean_img and test_clean_key to precise which image you want to de-noise
```
#launch a denoising operation on the image DATA_1_Phase_Type1_2_0.25_1.5_4_50.mat with the model experiment_xxx at the epoch 130
python3 main_holo.py --test_mode --test_noisy_img Holography/DATAEVAL/DATAEVAL/DATA_1_Phase_Type1_2_0.25_1.5_4_50.mat --test_noisy_key 'Phaseb' --test_clean_img Holography/DATAEVAL/DATAEVAL/DATA_1_Phase_Type1_2_0.25_1.5_4_50.mat --test_clean_key 'Phase' --input_dir PyTorchExperiments/experiment_xxx --epoch 130
```
If you do not give an image to de-noise an evaluation of the entire training and testing database you start.
```
#launch a denoising operation on the 25 images of Holography/DATABASE and Holography/DATAEVAL/DATAEVAL database with the model experiment_xxx at the epoch 130
python3 main_holo.py --test_mode --input_dir PyTorchExperiments/experiment_xxx --epoch 130
```
The results of those de-noising operation can be found in a TestImages directory
......@@ -8,46 +8,89 @@ def parse():
parser = argparse.ArgumentParser(
description='DnCNN')
parser.add_argument('--input_dir', type=str, default='./PyTorchCheckpoint/', help='directory of saved checkpoints for denoising operation or retraining')
# #name for user #name for program #type #default values #explanation sentences
parser.add_argument('--input_dir', dest='input_dir', type=str, default='./PyTorchCheckpoint/', help='directory of saved checkpoints for denoising operation or retraining')
parser.add_argument('--clean_train', type=str, default='data1/img_clean_train_1-2-3-4-5_0_two_50_50_9.npy', help='filepath of noise clean file for training')
parser.add_argument('--noisy_train', type=str, default='data1/img_noisy_train_1-2-3-4-5_0_two_50_50_9.npy', help='filepath of noisy file for training')
parser.add_argument('--train_dir', dest='train_dir', type=str, default='./Holography/DATABASE/', help='directory of training database')
parser.add_argument('--eval_dir', dest='eval_dir', type=str, default='./Holography/DATABASE/', help='directory of training database')
parser.add_argument('--test_dir', dest='test_dir', type=str, default='./Holography/DATAEVAL/DATAEVAL/', help='directory of testing database')
parser.add_argument('--save_test_dir', dest='save_test_dir', type=str, default='./TestImages/', help='directory where results of de-noising operation will be saved')
parser.add_argument('--clean_eval', type=str, default='data1/img_clean_train_1-2-3-4-5_0_two_50_50_9.npy', help='filepath of noise clean file for eval')
parser.add_argument('--noisy_eval', type=str, default='data1/img_noisy_train_1-2-3-4-5_0_two_50_50_9.npy', help='filepath of noisy file for eval')
parser.add_argument('--num_epochs', type=int, default=200, help='number of epochs to train')
parser.add_argument('--D', type=int, default=4, help='number of dilated convolutional layer')
parser.add_argument('--C', type=int, default=64, help='kernel size of convolutional layer')
parser.add_argument('--train_patterns', dest='train_patterns', type=int, nargs='+', default=(1, 2, 3, 4, 5), help='patterns used for training')
parser.add_argument('--train_noises', dest='train_noises', type=str, default="0-1-1.5-2-2.5", help='noise levels used for training ')
parser.add_argument('--plot', action='store_true', help='plot loss during training')
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate for training')
parser.add_argument('--eval_patterns', dest='eval_patterns', type=int, nargs='+', default=(1, 2, 3, 4, 5), help='patterns used for eval')
parser.add_argument('--eval_noises', dest='eval_noises', type=str, default="0-1-1.5-2-2.5", help='noise levels used for eval ')
parser.add_argument('--image_size', type=int, nargs='+', default=(50, 50), help='size of training images')
parser.add_argument('--batch_size', type=int, default=384)
parser.add_argument('--test_patterns', dest='test_patterns', type=int, nargs='+', default=(1, 2, 3, 4, 5), help='patterns used for testing')
parser.add_argument('--test_noises', dest='test_noises', type=str, default="0-1-1.5-2-2.5", help='noise levels used for testing ')
parser.add_argument('--epoch', type=int, default=None, help='epoch\'s number from which we going to retrain')
parser.add_argument('--test_mode', action='store_true',help='testing phase')
parser.add_argument('--clean_train', dest='clean_train', type=str, default='data1/img_clean_train_1-2-3-4-5_0-1-1.5-2-2.5_two_50_50_384.npy', help='filepath of noise free file for training')
parser.add_argument('--noisy_train', dest='noisy_train', type=str, default='data1/img_noisy_train_1-2-3-4-5_0-1-1.5-2-2.5_two_50_50_384.npy', help='filepath of noisy file for training')
parser.add_argument('--image_mode', type=int, default=1, help='1 or 3 (black&white or RGB)')
parser.add_argument('--clean_eval', dest='clean_eval', type=str, default='data1/img_clean_train_1-2-3_0-1-1.5two.npy', help='filepath of noise free file for eval')
parser.add_argument('--noisy_eval', dest='noisy_eval', type=str, default='data1/img_noisy_train_1-2-3_0-1-1.5two.npy', help='filepath of noisy file for eval')
parser.add_argument('--num_epochs', dest='num_epochs', type=int, default=200, help='number of epochs to train')
parser.add_argument('--D', dest='D', type=int, default=4, help='number of dilated convolutional layer (resBlock)')
parser.add_argument('--C', dest='C', type=int, default=64, help='kernel size of convolutional layer')
parser.add_argument('--plot', dest='plot', action='store_true', help='plot loss during training')
parser.add_argument('--lr', dest='lr', type=float, default=1e-3, help='learning rate for training')
parser.add_argument('--train_image_size', dest='train_image_size',type=int, nargs='+', default=(50, 50), help='size of train images')
parser.add_argument('--eval_image_size', dest='eval_image_size', type=int, nargs='+', default=(1024, 1024), help='size of eval images')
parser.add_argument('--test_image_size', dest='test_image_size', type=int, nargs='+', default=(1024, 1024), help='size of test images')
parser.add_argument('--image_mode', dest='image_mode', type=int, default=1, help='1 or 3 (black&white or RGB)')
parser.add_argument('--batch_size', dest='batch_size', type=int, default=384, help="")
parser.add_argument('--epoch', dest='epoch', type=int, default=None, help='epoch\'s number from which we going to retrain')
parser.add_argument('--test_mode', dest='test_mode', action='store_true', help='testing phase')
parser.add_argument('--tsf', dest='tsf', action='store_true', help='add if code in tensorflow')
parser.add_argument('--graph', dest='graph', action='store_true', help='add if graph is visible')
parser.add_argument('--graph_fin', dest='graph_fin', action='store_true', help='add if graph is visible during training')
parser.add_argument('--tsf', action='store_true',help='add if code in tensorflow')
parser.add_argument('--graph', action='store_true',help='add if graph is visible')
parser.add_argument('--graph_fin', action='store_true',help='add if graph is visible during training')
# Tensorflow arguments
parser.add_argument('--use_gpu', dest='use_gpu', type=int, default=1, help='gpu flag, 1 for GPU and 0 for CPU')
parser.add_argument('--checkpoint_dir', dest='ckpt_dir', default='./checkpoint', help='models are saved here')
parser.add_argument('--sample_dir', dest='sample_dir', default='./sample', help='sample are saved here')
parser.add_argument('--test_dir', dest='test_dir', default='./test', help='test sample are saved here')
parser.add_argument('--checkpoint_dir', dest='ckpt_dir', type=str, default='./checkpoint', help='models are saved here')
parser.add_argument('--sample_dir', dest='sample_dir', type=str, default='./sample', help='sample are saved here')
#parser.add_argument('--test_dir', dest='test_dir', default='./test', help='test sample are saved here')
parser.add_argument('--params', dest='params', type=str, default='', help='hyper parameters')
parser.add_argument('--test_noisy_img', dest='noisy_img', type=str, help='name and directory of the noisy image for testing')
parser.add_argument('--test_noisy_key', dest='noisy_key', type=str, help='name of the key for noisy matlab image for testing')
parser.add_argument('--test_clean_img', dest='clean_img', type=str, help='name and directory of the clean image for testing (eventually none)')
parser.add_argument('--test_clean_key', dest='clean_key', type=str, help='name of the key for clean matlab image for testing (eventually none)')
parser.add_argument('--test_noisy_img', dest='noisy_img', type=str, help='path of the noisy image for testing')
parser.add_argument('--test_noisy_key', dest='noisy_key', type=str, help='key for noisy matlab image for testing')
parser.add_argument('--test_clean_img', dest='clean_img', type=str, help='path of the clean image for testing')
parser.add_argument('--test_clean_key', dest='clean_key', type=str, help='key for clean matlab image for testing')
parser.add_argument('--test_flip', dest='flip', type=bool, default=False, help='option for upside down flip of noisy (and clean) test image')
parser.add_argument('--test_ckpt_index', dest='ckpt_index', type=str, default='', help='name and directory of the checkpoint that will be restored.')
parser.add_argument('--save_dir', dest='save_dir', default='./data1/', help='dir of patches')
parser.add_argument('--exp_file', dest='exp_file', help='experiment file')
#parser.add_argument('--test_ckpt_index', dest='ckpt_index', type=str, default='', help='name and directory of the checkpoint that will be restored.')
parser.add_argument('--save_dir', dest='save_dir', type=str, default='./data1/', help='dir of patches')
parser.add_argument('--exp_file', dest='exp_file', type=str, help='experiment file')
parser.add_argument('--nb_iteration', dest='nb_iteration', type=int, default=3, help='number of iteration for de-noising operation')
parser.add_argument('--isDebug', dest='isDebug', action='store_true')
parser.add_argument('--patch_size', dest='patch_size', default=50)
parser.add_argument('--stride', dest='stride', default=50)
parser.add_argument('--step', dest='step', default=0)
parser.add_argument('--phase_type', dest='phase_type', default="two")
parser.add_argument('--patch_per_image', dest='patch_per_image', default=384)
parser.add_argument('--noise_src_dir', dest='noise_src_dir', default="./chemin/")
parser.add_argument('--clean_src_dir', dest='clean_src_dir', default="./chemin/")
parser.add_argument('--perform_validation', dest='perform_validation', action="store_true")
parser.add_argument('--scales', dest='scales', type=int, nargs='+', default=[1], help='size of test images')
parser.add_argument('--originalsize', dest='originalsize', type=int, nargs='+', default=(1024, 1024), help='size of test images')
return parser.parse_args()
......
......@@ -12,7 +12,7 @@ class NoisyBSDSDataset(td.Dataset):
"""
def __init__(self, clean, noisy, image_mode, image_size):
def __init__(self, image_mode, image_size):
""" Initialize the data loader
Arguments:
......@@ -25,17 +25,8 @@ class NoisyBSDSDataset(td.Dataset):
self.image_mode = image_mode
self.image_size = image_size
self.training_name = noisy
print("clean : ", clean)
print("noisy : ", noisy)
self.clean, self.noisy = load_train_data(filepath=clean, noisyfilepath=noisy, phase_type="two")
rdm = np.random.randint(0, 2, self.clean.shape[0])
self.clean = normalize_data(self.clean, 'two', rdm, True)
self.noisy = normalize_data(self.noisy, 'two', rdm, True)
def __len__(self):
return len(self.clean)
......@@ -49,6 +40,7 @@ class NoisyBSDSDataset(td.Dataset):
def __getitem__(self, idx):
cleanSample = self.clean[idx]
noisySample = self.noisy[idx]
......@@ -59,3 +51,69 @@ class NoisyBSDSDataset(td.Dataset):
noisySample = torch.Tensor(noisySample)
return noisySample, cleanSample
class TrainDataset(NoisyBSDSDataset):
def __init__(self, clean, noisy, image_mode, image_size):
""" Initialize the data loader
Arguments:
clean(String) : The path of clean data
noisy(String) : The path of noisy data
image_mode(int) : The number of channel of the clean and noisy data
image_size((int, int)) : The size (in pixels) of clean and noisy data
"""
super(TrainDataset, self).__init__(image_mode, image_size)
self.training_name = noisy
print("clean : ", clean)
print("noisy : ", noisy)
self.clean, self.noisy = load_train_data(filepath=clean, noisyfilepath=noisy, phase_type="two")
rdm = np.random.randint(0, 2, self.clean.shape[0])
self.clean = normalize_data(self.clean, 'two', rdm, True)
self.noisy = normalize_data(self.noisy, 'two', rdm, True)
rotation = 8
self.clean = rotate_data(self.clean, rotation)
self.noisy = rotate_data(self.noisy, rotation)
print("data_size : ", self.clean.shape)
print("data_type : ", type(self.clean))
class EvalDataset(NoisyBSDSDataset):
def __init__(self, eval_dir, noises, patterns, image_mode, image_size):
""" Initialize the data loader
Arguments:
clean(String) : The path of clean data
noisy(String) : The path of noisy data
image_mode(int) : The number of channel of the clean and noisy data
image_size((int, int)) : The size (in pixels) of clean and noisy data
"""
super(EvalDataset, self).__init__(image_mode, image_size)
self.training_name = eval_dir + "-".join([str(pattern) for pattern in patterns]) + "/" + noises
self.clean, self.noisy = from_DATABASE(eval_dir, noises, patterns, False)
self.clean = np.array(self.clean)
self.noisy = np.array(self.noisy)
self.clean = self.clean.reshape(-1, self.image_size[0], self.image_size[1], self.image_mode)
self.noisy = self.noisy.reshape(-1, self.image_size[0], self.image_size[1], self.image_mode)
print("data_size : ", self.clean.shape)
print("data_type : ", type(self.clean))
......@@ -28,6 +28,7 @@ Copyright 2019-2020 Marie Tahon
"""
import argparse
import argument
#import re
#import glob
import pathlib
......@@ -37,7 +38,7 @@ import PIL
#import random
import numpy as np
from utils import *
from hparams import hparams, hparams_debug_string
#from hparams import hparams, hparams_debug_string
from scipy.io import loadmat
__license__ = "LGPL"
......@@ -66,10 +67,11 @@ parser.add_argument('--params', dest='params', type=str, default='', help='hyper
# check output arguments
#parser.add_argument('--from_file', dest='from_file', default="./data/img_clean_pats.npy", help='get pic from file')
#parser.add_argument('--num_pic', dest='num_pic', type=int, default=10, help='number of pic to pick')
args = parser.parse_args()
#args = parser.parse_args()
#print(args.params['patch_size'])
hparams.parse(args.params)
#hparams.parse(args.params)
args = argument.parse()
#def from_DATABASE():
......@@ -79,9 +81,9 @@ hparams.parse(args.params)
def generate_patches(isDebug=True):
#global DATA_AUG_TIMES = 1
#hparams.patch_size = args.pat_size
print(hparams_debug_string())
# print(hparams_debug_string())
#filepaths, noisyfilepaths = from_HOLODEEP(hparams.noise_src_dir, hparams.clean_src_dir, hparams.train_noise, hparams.train_patterns, path_only=True)
filepaths, noisyfilepaths = from_NATURAL(hparams.noise_src_dir, hparams.clean_src_dir, path_only=True)
filepaths, noisyfilepaths = from_NATURAL(args.noise_src_dir, args.clean_src_dir, path_only=True)
if isDebug:
filepaths = filepaths[:10]
noisyfilepaths = noisyfilepaths[:10]
......@@ -89,7 +91,7 @@ def generate_patches(isDebug=True):
#print(noisyfilepaths)
#exit()
print("number of clean training data {0} and noisy {1}".format( len(filepaths), len(noisyfilepaths)))
scales = hparams.scales #old version [1, 0.9, 0.8, 0.7]
scales = args.scales #old version [1, 0.9, 0.8, 0.7]
# calculate the number of patches
#we assume that all images have the same size
......@@ -103,7 +105,7 @@ def generate_patches(isDebug=True):
#print(filepaths[i])
#img = Image.open(filepaths[i]).convert('L') # convert RGB to gray, no need to convert: grayscale
for s in range(len(scales)):
newsize = (int(hparams.originalsize[0] * scales[s]), int(hparams.originalsize[1] * scales[s]))
newsize = (int(args.originalsize[0] * scales[s]), int(args.originalsize[1] * scales[s]))
d[newsize]=d.get(newsize,0)+1
#pas besoin de reconstruire l'image vraiment, on a juste besoin de sa dimension
#img_s = img.resize(newsize, resample=PIL.Image.BICUBIC) # do not change the original img
......@@ -112,27 +114,27 @@ def generate_patches(isDebug=True):
#for x in range(0 + hparams.step, (im_h - hparams.patch_size), hparams.stride):
# for y in range(0 + hparams.step, (im_w - hparams.patch_size), hparams.stride):
# count += 1
count1 += int((im_h-hparams.patch_size)/hparams.stride) * int((im_w-hparams.patch_size)/hparams.stride)
count1 += int((im_h-args.patch_size)/args.stride) * int((im_w-args.patch_size)/args.stride)
count = len(scales)*len(filepaths)*hparams.patch_per_image
count = len(scales)*len(filepaths)*args.patch_per_image
print("total number of patches for all taining images = ", count1, " and used patches = ", count)
origin_patch_num = count * DATA_AUG_TIMES
for size in d:
print("%i images in size %s" % (d[size], size))
if origin_patch_num % hparams.batch_size != 0:
if origin_patch_num % args.batch_size != 0:
#if origin_patch_num > hparams.batch_size:
numPatches = int((origin_patch_num / hparams.batch_size + 1) * hparams.batch_size)
numPatches = int((origin_patch_num / args.batch_size + 1) * args.batch_size)
else:
numPatches = origin_patch_num
#numPatches = int(numPatches)
print ("total patches = %d , batch size = %d, total batches = %d" % (numPatches, hparams.batch_size, numPatches / hparams.batch_size))
print ("total patches = %d , batch size = %d, total batches = %d" % (numPatches, args.batch_size, numPatches / args.batch_size))
# data matrix 4-D
inputs = np.zeros((numPatches, hparams.patch_size, hparams.patch_size, 1), dtype="uint8")
noisyinputs = np.zeros((numPatches, hparams.patch_size, hparams.patch_size, 1), dtype="uint8")
inputs = np.zeros((numPatches, args.patch_size, args.patch_size, 1), dtype="uint8")
noisyinputs = np.zeros((numPatches, args.patch_size, args.patch_size, 1), dtype="uint8")
print("Shape of input (including noisy) : ", inputs.shape)
cpt_img_scale = 0
......@@ -155,26 +157,26 @@ def generate_patches(isDebug=True):
for j in range(DATA_AUG_TIMES):
im_h, im_w, _ = img_s.shape
cpt = 0
indPatch_x = range(0 + hparams.step, im_h - hparams.patch_size, hparams.stride)
indPatch_y = range(0 + hparams.step, im_w - hparams.patch_size, hparams.stride)
indPatch_x = range(0 + args.step, im_h - args.patch_size, args.stride)
indPatch_y = range(0 + args.step, im_w - args.patch_size, args.stride)
#numPatch_per_img = int((im_h-hparams.patch_size)/hparams.stride) * int((im_w-hparams.patch_size)/hparams.stride)
numPatch_per_img = len(indPatch_x) * len(indPatch_y)
inputs_img_scale = np.zeros((numPatch_per_img, hparams.patch_size, hparams.patch_size, 1), dtype = 'uint8')
noisyinputs_img_scale = np.zeros((numPatch_per_img, hparams.patch_size, hparams.patch_size, 1), dtype = 'uint8')
inputs_img_scale = np.zeros((numPatch_per_img, args.patch_size, args.patch_size, 1), dtype = 'uint8')
noisyinputs_img_scale = np.zeros((numPatch_per_img, args.patch_size, args.patch_size, 1), dtype = 'uint8')
#print(inputs_img_scale.shape)
for x in indPatch_x:
for y in indPatch_y:
#en fonction du mode : 0 normal, 1 flip up/down, 2 rotate 90, ..
#inputs[count, :, :, :] = data_augmentation(img_s[x:x + args.pat_size, y:y + args.pat_size, :], np.random.randint(0, 7))
#du coup je veux juste version normale de l'image
inputs_img_scale[cpt,: ,: ,:] = img_s[x:x + hparams.patch_size, y:y + hparams.patch_size, :]
noisyinputs_img_scale[cpt,: ,: ,:] = noisyimg_s[x:x + hparams.patch_size, y:y + hparams.patch_size, :]
inputs_img_scale[cpt,: ,: ,:] = img_s[x:x + args.patch_size, y:y + args.patch_size, :]
noisyinputs_img_scale[cpt,: ,: ,:] = noisyimg_s[x:x + args.patch_size, y:y + args.patch_size, :]
cpt += 1
#shuffle the different patches of an image similarly on noisy and clean images
perm_idx = np.random.permutation(cpt)[:hparams.patch_per_image]
perm_idx = np.random.permutation(cpt)[:args.patch_per_image]
#print("perm_idx", perm_idx.shape, perm_idx)
inputs[cpt_img_scale: cpt_img_scale + hparams.patch_per_image, :, :, :] = inputs_img_scale[perm_idx, :, :, :]
noisyinputs[cpt_img_scale:cpt_img_scale + hparams.patch_per_image, :, :, :] = noisyinputs_img_scale[perm_idx, :, :, :]
inputs[cpt_img_scale: cpt_img_scale + args.patch_per_image, :, :, :] = inputs_img_scale[perm_idx, :, :, :]
noisyinputs[cpt_img_scale:cpt_img_scale + args.patch_per_image, :, :, :] = noisyinputs_img_scale[perm_idx, :, :, :]
cpt_img_scale += 1
del img, noisyimg
......@@ -201,7 +203,7 @@ def generate_patches(isDebug=True):
print('shape of inputs: ', inputs.shape)
print('amplitude of inputs: ', np.max(inputs), np.min(inputs))
sess_name = extract_sess_name(hparams.train_patterns, hparams.train_noise, hparams.phase_type, hparams.stride , hparams.patch_size, hparams.patch_per_image)
sess_name = extract_sess_name(args.train_patterns, args.train_noise, args.phase_type, args.stride , args.patch_size, args.patch_per_image)
if not os.path.exists(args.save_dir):
os.mkdir(args.save_dir)
np.save(os.path.join(args.save_dir, "img_clean_train_" + sess_name), inputs)
......@@ -210,4 +212,4 @@ def generate_patches(isDebug=True):
if __name__ == '__main__':
generate_patches(hparams.isDebug)
generate_patches(args.isDebug)
......@@ -28,6 +28,7 @@ Copyright 2019-2020 Marie Tahon
"""
import argparse
import argument
#import re
#import glob
import pathlib
......@@ -37,7 +38,7 @@ import PIL
#import random
import numpy as np
from utils import *
from hparams import hparams, hparams_debug_string
#from hparams import hparams, hparams_debug_string
from scipy.io import loadmat
__license__ = "LGPL"
......@@ -68,44 +69,45 @@ parser.add_argument('--params', dest='params', type=str, default='', help='hyper
#parser.add_argument('--num_pic', dest='num_pic', type=int, default=10, help='number of pic to pick')
args = parser.parse_args()
#print(args.params['patch_size'])
hparams.parse(args.params)
#hparams.parse(args.params)
#import ipdb
args=argument.parse()
def generate_patches(isDebug=True):
#global DATA_AUG_TIMES = 1
#hparams.patch_size = args.pat_size
print(hparams_debug_string())
# print(hparams_debug_string())
#filepaths = [x for x in src_path.glob('*.tiff')] #('*.mat')
#noisyfilepaths = [x for x in noisy_path.glob('*.tiff')] #('*.mat')
cleanmat, noisymat = from_DATABASE(hparams.eval_dir, hparams.train_noise, hparams.train_patterns)
cleanmat, noisymat = from_DATABASE(args.train_dir, args.train_noises, args.train_patterns)
#ipdb.set_trace()
print("number of clean training data {0} and noisy {1}".format( len(cleanmat), len(noisymat)))
scales = 1 #et on ne le bouge pas !!!! hparams.scales #old version [1, 0.9, 0.8, 0.7]
if hparams.patch_size > hparams.originalsize[0]:
if args.patch_size > args.originalsize[0]:
sys.exit('patch size > size of original size of images')
nb_image = len(cleanmat)
nb_patch_per_image = int((hparams.originalsize[0] - hparams.patch_size)/hparams.stride + 1) * int((hparams.originalsize[1] - hparams.patch_size)/hparams.stride + 1) #(1024 - 50)/50 + 1 = 20 -> 20*20 = 400 patch per img
nb_patch_per_image = int((args.originalsize[0] - args.patch_size)/args.stride + 1) * int((args.originalsize[1] - args.patch_size)/args.stride + 1) #(1024 - 50)/50 + 1 = 20 -> 20*20 = 400 patch per img
nb_origin_patch = nb_patch_per_image * nb_image
nb_final_patch = hparams.patch_per_image * nb_image
nb_final_patch = args.patch_per_image * nb_image
print("total number of patches for all taining images = ", nb_origin_patch, " and used patches = ", nb_final_patch)
if nb_final_patch % hparams.batch_size != 0:
if nb_final_patch % args.batch_size != 0:
#if origin_patch_num > hparams.batch_size:
numPatches = int(nb_final_patch / hparams.batch_size + 1) * hparams.batch_size
numPatches = int(nb_final_patch / args.batch_size + 1) * args.batch_size
else:
numPatches = nb_final_patch
print ("total patches = %d , batch size = %d, total batches = %d" % (numPatches, hparams.batch_size, numPatches / hparams.batch_size))
print ("total patches = %d , batch size = %d, total batches = %d" % (numPatches, args.batch_size, numPatches / args.batch_size))
# data matrix 4-D
cleaninputs = np.zeros((numPatches, hparams.patch_size, hparams.patch_size, 1))
noisyinputs = np.zeros((numPatches, hparams.patch_size, hparams.patch_size, 1))
cleaninputs = np.zeros((numPatches, args.patch_size, args.patch_size, 1))
noisyinputs = np.zeros((numPatches, args.patch_size, args.patch_size, 1))
print("Shape of input (including noisy) : ", cleaninputs.shape)
#ipdb.set_trace()
cpt_img_scale = 0
......@@ -128,23 +130,23 @@ def generate_patches(isDebug=True):
# for j in range(DATA_AUG_TIMES):
# im_h, im_w, _ = img_s.shape
cpt = 0
inputs_img_scale = np.zeros((nb_patch_per_image, hparams.patch_size, hparams.patch_size, 1))
noisyinputs_img_scale = np.zeros((nb_patch_per_image, hparams.patch_size, hparams.patch_size, 1))
for x in range(0 + hparams.step, hparams.originalsize[0] - hparams.patch_size, hparams.stride):
for y in range(0 + hparams.step, hparams.originalsize[1] - hparams.patch_size, hparams.stride):
inputs_img_scale = np.zeros((nb_patch_per_image, args.patch_size, args.patch_size, 1))
noisyinputs_img_scale = np.zeros((nb_patch_per_image, args.patch_size, args.patch_size, 1))
for x in range(0 + args.step, args.originalsize[0] - args.patch_size, args.stride):
for y in range(0 + args.step, args.originalsize[1] - args.patch_size, args.stride):
#print(x,y)
#en fonction du mode : 0 normal, 1 flip up/down, 2 rotate 90, ..
#inputs[count, :, :, :] = data_augmentation(img_s[x:x + args.pat_size, y:y + args.pat_size, :], np.random.randint(0, 7))
#du coup je veux juste version normale de l'image
inputs_img_scale[cpt,: ,: ,:] = cleanimg[:, x:x + hparams.patch_size, y:y + hparams.patch_size, :]
noisyinputs_img_scale[cpt,: ,: ,:] = noisyimg[:, x:x + hparams.patch_size, y:y + hparams.patch_size, :]
inputs_img_scale[cpt,: ,: ,:] = cleanimg[:, x:x + args.patch_size, y:y + args.patch_size, :]
noisyinputs_img_scale[cpt,: ,: ,:] = noisyimg[:, x:x + args.patch_size, y:y + args.patch_size, :]
cpt += 1
#shuffle the different patches of an image similarly on noisy and clean images
perm_idx = np.random.permutation(cpt)[:hparams.patch_per_image]
perm_idx = np.random.permutation(cpt)[:args.patch_per_image]
#print("perm_idx", perm_idx.shape, perm_idx)
cleaninputs[cpt_img_scale: cpt_img_scale + hparams.patch_per_image, :, :, :] = inputs_img_scale[perm_idx, :, :, :]
noisyinputs[cpt_img_scale:cpt_img_scale + hparams.patch_per_image, :, :, :] = noisyinputs_img_scale[perm_idx, :, :, :]
cpt_img_scale += hparams.patch_per_image
cleaninputs[cpt_img_scale: cpt_img_scale + args.patch_per_image, :, :, :] = inputs_img_scale[perm_idx, :, :, :]
noisyinputs[cpt_img_scale:cpt_img_scale + args.patch_per_image, :, :, :] = noisyinputs_img_scale[perm_idx, :, :, :]
cpt_img_scale += args.patch_per_image
#del img, noisyimg
#if hparams.phase_type == 'phi':
......@@ -171,7 +173,7 @@ def generate_patches(isDebug=True):
print('shape of inputs: ', cleaninputs.shape)
print('amplitude of inputs: ', np.max(cleaninputs), np.min(cleaninputs))
sess_name = extract_sess_name(hparams.train_patterns, hparams.train_noise, hparams.phase_type, hparams.stride, hparams.patch_size, hparams.patch_per_image)
sess_name = extract_sess_name(args.train_patterns, args.train_noises, args.phase_type, args.stride, args.patch_size, args.patch_per_image)
if not os.path.exists(args.save_dir):
os.mkdir(args.save_dir)
np.save(os.path.join(args.save_dir, "img_clean_train_" + sess_name), cleaninputs)
......@@ -180,4 +182,4 @@ def generate_patches(isDebug=True):
if __name__ == '__main__':
generate_patches(hparams.isDebug)
generate_patches(args.isDebug)
......@@ -8,10 +8,9 @@ import nntools as nt
from model import *
from data import *
from argument import *
#from main_holo_tsf import test_main
def save_clean_pred_rad(args, exp, clean_pred_rad, noisy, clean, nom_img = "NoisyPhase"):
def save_clean_pred_rad(args, exp, clean_pred_rad, noisy, clean, nom_img = "NoisyPhasePred"):
"""This method is used to save the result of a de-noising operation
Arguments:
......@@ -23,7 +22,7 @@ def save_clean_pred_rad(args, exp, clean_pred_rad, noisy, clean, nom_img = "Nois
nom_img (str, optional) : The saving name for the result
"""
save_name = os.path.join("TestImages", os.path.basename(os.path.normpath(args.input_dir)))
save_name = os.path.join(args.save_test_dir, os.path.basename(os.path.normpath(args.input_dir)))
if not os.path.exists(save_name):
os.makedirs(save_name)
......@@ -40,12 +39,11 @@ def save_clean_pred_rad(args, exp, clean_pred_rad, noisy, clean, nom_img = "Nois
psnr = cal_psnr(rad_to_flat(clean_pred_rad), rad_to_flat(clean))
std = cal_std_phase(clean_pred_rad, clean)
print("\n\n")
print("\n")
print("epoch : ", ep