Commit 66a41d35 authored by Marie Tahon's avatar Marie Tahon
Browse files

update

parent a3e852e4
......@@ -47,16 +47,17 @@ Modifiable parameters are located in hparams.py
```
## Localization of data and generation of patches
Data is either .tiff images or .mat MATLAB matrices.
MATLAB images for training and development purposes are given in HOLODEEPmat.zip file.
MATLAB images for evaluation purposes are given in DATAEVAL.zip file.
Data is either .tiff images or .mat MATLAB matrices. Three databases are available for the moment:
* HOLODEEPmat: MATLAB images for training and development purposes: 5 patterns and 5 noise level, 1024x1024
* DATAEVAL: 3 MATLAB images for evaluation purposes: data1, data20 and VibMap.
* NATURAL: 400 images in B&W for image denoising, noisy images are obtained with additive Gaussian noise: 180x180
Clean reference data is located in hparams.clean_src_dir
Noisy input data is located in hparams.noisy_src_dir
Images are referred by their name according to their pattern number (hparams.train_patterns from 1 to 5), their noise level (hparams.train_noise 0, 1, 1.5, 2 or 2.5). Images for train and developpement (or test) and final evaluation (eval) are given by "train", "test" or "eval" suffix.
Images from HOLODEEP are referred by their name according to their pattern number (hparams.train_patterns from 1 to 5), their noise level (hparams.train_noise 0, 1, 1.5, 2 or 2.5). Images for train and developpement (or test) and final evaluation (eval) are given by "train", "test" or "eval" suffix.
All phase data is normalized between 0 and 1 for being in agreement with the output of the network. A propotional coefficient is applied on the predicted image to rescale the pahse amplitude between -pi and pi.
All phase data is converted using sine or cosine functions and normalized between 0 and 1 for being in agreement with the output of the network. A propotional coefficient is applied on the predicted image to rescale the pahse amplitude between -pi and pi.
Patch size can be given as an argument in the command line either if images .tiff are all in the same directory, or if the .mat files are given in seperated directories.
```
......@@ -72,7 +73,8 @@ To train the CNN, use the following command, where:
* checkpoint_dir is the directory where checkpoints will be saved (intermediate and final weights of the model)
* sample_dir is the directory where are located results files, a directory of the running test referred with the timestamp is created at each new run, denoised output images and res file are located in this directory
* params is optional parameters to change hparams.py in an other way.
* save_dir is where are located numpy matrices used for training the network.
* save_dir is where are located numpy matrices used for training the network (generated by the script genererate_patches_holo).
* Data augmentation (x8) is done before batch creation. It consists in considering cosine and sine version of the phase image, the transposed and phase shifted version (pi/4)
```
python main_holo.py --checkpoint_dir /lium/raid01_c/tahon/holography/checkpoints/ --sample_dir /lium/raid01_c/tahon/holography/eval_samples/ --params "phase=train" --save_dir "./data1/"
......@@ -80,8 +82,11 @@ python main_holo.py --checkpoint_dir /lium/raid01_c/tahon/holography/checkpoints
## Test
To test the model which is located here `/lium/raid01_c/tahon/holography/checkpoints/run-test2020-04-12_12\:14\:29.082341/, use one of these commands. The first one is the detailed Python command, while the second one, run the job on the cluster via slurm (test on DATAEVAL and HOLODEEP).
```
python main_holo.py --params "phase=test" --test_noisy_img /lium/raid01_c/tahon/holography/HOLODEEPmat/PATTERN1/MFH_0/NoisyPhase.mat --test_clean_img /lium/raid01_c/tahon/holography/HOLODEEPmat/PATTERN1/PhaseDATA.mat --test_flip True --test_ckpt_index /lium/raid01_c/tahon/holography/checkpoints/run-test2019-10-15_10\:24\:08.363424/DnCNN-tensorflow-4200.meta
python main_holo.py --params "phase=test" --test_noisy_img /lium/raid01_c/tahon/holography/HOLODEEPmat/PATTERN1/MFH_0/NoisyPhase.mat --test_noisy_key 'NoisyPhase' --test_clean_img /lium/raid01_c/tahon/holography/HOLODEEPmat/PATTERN1/PhaseDATA.mat --test_clean_key 'Phase' --test_flip False --test_ckpt_index /lium/raid01_c/tahon/holography/checkpoints/run-test2020-04-12_12\:14\:29.082341/
./run_holo_test.sh /lium/raid01_c/tahon/holography/checkpoints/run-test2020-04-12_12\:14\:29.082341/
```
## Results
......@@ -95,5 +100,6 @@ Epochs | lr | train noise | nb batches | layers | duration | 0 | 1 |
```
## TODO
- [ ] check final evaluation phase
- [x] check final evaluation phase
- [ ] check loading checkpoint (but not latest) from a previous model
- [ ] move to PyTorch
......@@ -80,8 +80,8 @@ def generate_patches(isDebug=True):
#global DATA_AUG_TIMES = 1
#hparams.patch_size = args.pat_size
print(hparams_debug_string())
filepaths, noisyfilepaths = from_HOLODEEP(hparams.noise_src_dir, hparams.clean_src_dir, hparams.train_noise, hparams.train_patterns, path_only=True)
#filepaths, noisyfilepaths = from_NATURAL(hparams.noise_src_dir, hparams.clean_src_dir, path_only=True)
#filepaths, noisyfilepaths = from_HOLODEEP(hparams.noise_src_dir, hparams.clean_src_dir, hparams.train_noise, hparams.train_patterns, path_only=True)
filepaths, noisyfilepaths = from_NATURAL(hparams.noise_src_dir, hparams.clean_src_dir, path_only=True)
if isDebug:
filepaths = filepaths[:10]
noisyfilepaths = noisyfilepaths[:10]
......
......@@ -46,31 +46,31 @@ hparams = tf.contrib.training.HParams(
#clean_src_dir = '/lium/raid01_c/tahon/holography/NATURAL/original',
#eval_dir = '/lium/raid01_c/tahon/holography/HOLODEEPmat/',
#test_dir = 'lium/raid01_c/tahon/holography/TEST/',
phase = 'train', #train or test phase
phase = 'test', #train or test phase
#image
isDebug = False, #True,#reate only 10 patches
originalsize = (128,128), #1024 for matlab database, 128 for holodeep database, 180 for natural images
originalsize = (1024,1024), #1024 for matlab database, 128 for holodeep database, 180 for natural images
phase_type = 'two', #keep phase between -pi and pi (phi), convert into cosinus (cos) or sinus (sin)
#select images for training
train_patterns = [1, 2, 3], #number of images from 1 to 5
train_noise = [0], #[0, 1, 1.5, 2, 2.5],
train_patterns = [1, 2, 3, 4, 5], #number of images from 1 to 5
train_noise = '0', #[0, 1, 1.5, 2, 2.5],
#select images for evaluation (during training)
eval_patterns = [1, 2, 3, 4, 5],
eval_noise = [0, 1, 1.5, 2, 2.5],
eval_noise = '0-1-1.5-2-2.5',
#select images for testing
test_patterns = [1, 2, 3, 4, 5],
test_noise = [0, 1, 1.5, 2, 2.5],
test_noise = '0-1-1.5-2-2.5',
noise_type = 'spkl', #type of noise: speckle or gaussian (spkl|gauss)
sigma = 25, #noise level for gaussian denoising
#Training
nb_layers = 4,#original number is 16
batch_size = 64,#128
patch_per_image = 9, #9 pour des images 180*180 (NATURAL) Silvio a utilisé 384 pour des images 1024*1024 (MATLAB)
batch_size = 128,#128
patch_per_image = 384, #9 pour des images 180*180 (NATURAL) Silvio a utilisé 384 pour des images 1024*1024 (MATLAB)
patch_size = 50, #Silvio a utilisé 50.
epoch = 2000,#2000
lr = 0.001, # learning rate
epoch = 350,#2000
lr = 0.0005, # learning rate
stride = 50, # spatial step for cropping images values from initial script 10
step = 0, #initial spatial setp for cropping
scales = [1] #[1, 0.9, 0.8, 0.7] # scale for data augmentation
......
......@@ -53,7 +53,9 @@ parser.add_argument('--sample_dir', dest='sample_dir', default='./sample', help=
parser.add_argument('--test_dir', dest='test_dir', default='./test', help='test sample are saved here')
parser.add_argument('--params', dest='params', type=str, default='', help='hyper parameters')
parser.add_argument('--test_noisy_img', dest='noisy_img', type=str, default='', help='name and directory of the noisy image for testing')
parser.add_argument('--test_noisy_key', dest='noisy_key', type=str, default='', help='name of the key for noisy matlab image for testing')
parser.add_argument('--test_clean_img', dest='clean_img', type=str, default='', help='name and directory of the clean image for testing (eventually none)')
parser.add_argument('--test_clean_key', dest='clean_key', type=str, default='', help='name of the key for clean matlab image for testing (eventually none)')
parser.add_argument('--test_flip', dest='flip', type=bool, default=False, help='option for upside down flip of noisy (and clean) test image')
parser.add_argument('--test_ckpt_index', dest='ckpt_index', type=str, default='', help='name and directory of the checkpoint that will be restored.')
parser.add_argument('--save_dir', dest='save_dir', default='./data1/', help='dir of patches')
......@@ -61,6 +63,8 @@ args = parser.parse_args()
#ipdb.set_trace()
hparams.parse(args.params)
#hparams.train_noise = hparams.train_noise.split('-')
def denoiser_train(denoiser, lr):
#avec load_data les images sont déjà normalisée par 255.0
......@@ -85,13 +89,15 @@ def denoiser_train(denoiser, lr):
def denoiser_test(denoiser):
noisy = load_test_data(args.noisy_img, key = 'NoisyPhase', flipupdown = args.flip)
#noisy = load_test_data(args.noisy_img, key = 'Phaseb', flipupdown = args.flip) #pour vibPhase
#key = 'NoisyPhase' and 'Phase' for HOLODEEP
#key = 'Phaseb' and 'Phase' for DATA_1, DATA_20 and VibMap
print('toto')
print(args.noisy_key, args.clean_key)
noisy = load_test_data(args.noisy_img, key = args.noisy_key, flipupdown = args.flip) #pour DATA_1, DATA_20 et VibPhase
print('load noisy ref')
if args.clean_img:
print('load clean ref')
clean = load_test_data(args.clean_img, key = 'Phase', flipupdown = args.flip)
clean = load_test_data(args.clean_img, key = args.clean_key, flipupdown = args.flip)
else:
clean = noisy
test_files = (clean, noisy)
......@@ -102,7 +108,7 @@ def denoiser_test(denoiser):
save_dir = test_dir + '/' + sess_name + '/'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
denoiser.test(test_files, ckpt_index=args.ckpt_index, save_dir=save_dir, save_name=test_name, phase_type= hparams.phase_type)
denoiser.test(test_files, ckpt_dir=args.ckpt_index, save_dir=save_dir, save_name=test_name, phase_type= hparams.phase_type)
def main(_):
......
......@@ -2,10 +2,11 @@ import time
import tensorflow as tf
from utils import *
from datetime import datetime
from hparams import hparams_debug_string
from hparams import hparams, hparams_debug_string
import numpy as np
#import ipdb
def _activation(x):
y = tf.nn.relu(x + np.pi)
z = tf.nn.relu(2 * np.pi - y) - np.pi
......@@ -20,11 +21,30 @@ def loss_function(ref, pred, phase_type):
return tf.math.reduce_sum(ref - pred)
def dncnn16(input, is_training=True, output_channels=1):
#print("nb of layers for training is %d" %(NB_LAYERS))
with tf.variable_scope('block1'):
output = tf.layers.conv2d(input, 64, 3, padding='same', activation=tf.nn.relu)#attention c'etait tf.nn.relu
for layers in range(2, 16 + 1): #4 + 1): #16 + 1
with tf.variable_scope('block%d' % layers):
output = tf.layers.conv2d(output, 64, 3, padding='same', name='conv%d' % layers, use_bias=False)
output = tf.nn.relu(tf.layers.batch_normalization(output, training=is_training))
#output = tf.nn.sigmoid(tf.layers.batch_normalization(output, training=is_training))
#output = tf.nn.tanh(tf.layers.batch_normalization(output, training=is_training))
with tf.variable_scope('block17'):
output = tf.layers.conv2d(output, output_channels, 3, padding='same')
res = input - output
#res = _activation(input - output)
return res #% (2 * np.pi) - 2* np.pi# input-output
def dncnn(input, nb_layers=16, is_training=True, output_channels=1):
def dncnn4(input, is_training=True, output_channels=1):
#print("nb of layers for training is %d" %(NB_LAYERS))
with tf.variable_scope('block1'):
output = tf.layers.conv2d(input, 64, 3, padding='same', activation=tf.nn.relu)#attention c'etait tf.nn.relu
for layers in range(2, nb_layers + 1): #4 + 1): #16 + 1
for layers in range(2, 4 + 1): #4 + 1): #16 + 1
print("->layers", layers)
with tf.variable_scope('block%d' % layers):
output = tf.layers.conv2d(output, 64, 3, padding='same', name='conv%d' % layers, use_bias=False)
output = tf.nn.relu(tf.layers.batch_normalization(output, training=is_training))
......@@ -43,7 +63,7 @@ class denoiser(object):
self.sess = sess
self.input_c_dim = input_c_dim
self.sigma = sigma
self.nb_layers = nb_layers
#self.nb_layers = nb_layers #tf.placeholder(tf.int32 , name = 'nb_layers')
# build model
self.Y_ = tf.placeholder(tf.float32, [None, None, None, self.input_c_dim], name='clean_image')# tf.placeholder(dtype, shape= .., name = ..)
self.is_training = tf.placeholder(tf.bool, name='is_training')
......@@ -56,7 +76,13 @@ class denoiser(object):
else:
print('noise type not exists')
sys.exit()
self.Y = dncnn(self.X, self.nb_layers, is_training=self.is_training)#predict residual from noisy input
if nb_layers == 16:
self.Y = dncnn16(self.X, is_training=self.is_training)#predict residual from noisy input
elif nb_layers == 4:
self.Y = dncnn4(self.X, is_training=self.is_training)#predict residual from noisy input
else:
sys.exit("wrong number of layers")
#self.loss = (1.0 / batch_size) * tf.nn.l2_loss(tf.clip_by_value(self.Y, -np.pi, np.pi) - self.Y_)#loss between clean ref and clean pred
self.loss = (1.0 / batch_size) * tf.nn.l2_loss(self.Y - self.Y_)#loss between clean ref and clean pred
#self.loss = (1.0 / batch_size) * loss_function(self.Y_, self.Y, phase_type)
......@@ -136,21 +162,21 @@ class denoiser(object):
#old version where X is created from Y_ by gaussian noise addition
#output_clean_image, noisy_image, psnr = self.sess.run([self.Y, self.X, self.eva_psnr], feed_dict={self.Y_: data_clean, self.is_training: False})
clean_pred, psnr = self.sess.run([self.Y, self.eva_psnr], feed_dict={self.Y_: data_clean, self.X: data_noisy, self.is_training: False})
clean_pred, psnr = self.sess.run([self.Y, self.eva_psnr], feed_dict={self.Y_: data_clean, self.X: data_noisy, self.is_training: False})
return output_clean_image, noisy_image, psnr
def train(self, data, eval_data, batch_size, ckpt_dir, epoch, lr, sample_dir, phase_type, nb_layers, eval_every_epoch=2):
def train(self, data, eval_data, batch_size, ckpt_dir, epoch, lr, sample_dir, phase_type, nb_layers, eval_every_epoch=5):
phase_augmentation = True
ckpt_dir_ = ckpt_dir
#ckpt_dir_ = ckpt_dir
sess_name = 'run-test' + str(datetime.now()).replace(' ', '_')
ckpt_dir = ckpt_dir_ + '/' + sess_name + '/'
ckpt_dir_ = ckpt_dir + '/' + sess_name + '/'
sample_dir = sample_dir + '/' + sess_name + '/'
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
if not os.path.exists(ckpt_dir_):
os.makedirs(ckpt_dir_)
if not os.path.isdir(sample_dir):
os.makedirs(sample_dir)
fres = open(sample_dir + '/res', 'w')
fres.write('checkpoint directory is %s \n' %(ckpt_dir))
fres.write('checkpoint directory is %s \n' %(ckpt_dir_))
fres.write('session name is %s\n' %(sess_name))
fres.write(hparams_debug_string())
#train data is loaded from npy files generated with patches. values are given in terms of phase between -pi and pi
......@@ -180,8 +206,10 @@ class denoiser(object):
numBatch = int(numPatch / batch_size)
fres.write('Nb of batches is :%d \n' %(numBatch))
print('Nb of batches is :', numBatch)
print('Nb of layers is:', nb_layers)
# load pretrained model
load_model_status, global_step = self.load(ckpt_dir_)
print(ckpt_dir)
load_model_status, global_step = self.load(ckpt_dir)
if load_model_status:
iter_num = global_step
start_epoch = global_step // numBatch
......@@ -202,11 +230,14 @@ class denoiser(object):
merged = tf.summary.merge_all()
summary_psnr = tf.summary.scalar('eva_psnr', self.eva_psnr)
fres.write("[*] Start training, with start epoch %d start iter %d\n " % (start_epoch, iter_num))
fres.close()
#fres.close()
print("[*] Start training, with start epoch %d start iter %d : " % (start_epoch, iter_num))
start_time = time.time()
#ipdb.set_trace()
self.evaluate(iter_num, eval_data, sample_dir=sample_dir, summary_merged=summary_psnr, summary_writer=writer, sess_name=sess_name, phase_type=phase_type) # eval_data value range is 0-255
print("[*] Initial evaluation is not done for debug purposes")
fres.write("[*] Initial evaluation is not done for debug purposes\n")
fres.close()
#self.evaluate(iter_num, eval_data, sample_dir=sample_dir, summary_merged=summary_psnr, summary_writer=writer, sess_name=sess_name, phase_type=phase_type, nb_layers=nb_layers) # eval_data value range is 0-255
for epoch in range(start_epoch, epoch):
#np.random.shuffle(data) #no shuffle for the moment
#shuffle target and source synchronously with random permutation at each epoch.
......@@ -220,7 +251,7 @@ class denoiser(object):
batch_images_noisy = data_noisy[batch_id * batch_size:(batch_id + 1) * batch_size, :, :, :]
fres.write('max/min train: %.2f %.2f (clean), %.2f %.2f (noisy)\n' %(np.max(batch_images_clean), np.min(batch_images_clean), np.max(batch_images_noisy), np.min(batch_images_noisy)))
# batch_images = batch_images.astype(np.float32) / 255.0 # normalize the data to 0-1
_, loss, summary = self.sess.run([self.train_op, self.loss, merged], feed_dict={self.X: batch_images_noisy, self.Y_: batch_images_clean, self.lr: lr[epoch], self.nb_layers: nb_layers, self.is_training: True})
_, loss, summary = self.sess.run([self.train_op, self.loss, merged], feed_dict={self.X: batch_images_noisy, self.Y_: batch_images_clean, self.lr: lr[epoch], self.is_training: True})
fres.write("Epoch: [%2d] [%4d/%4d] time: %4.4f, loss: %.6f\n"% (epoch + 1, batch_id + 1, numBatch, time.time() - start_time, loss))
print("Epoch: [%2d] [%4d/%4d] time: %4.4f, loss: %.6f"% (epoch + 1, batch_id + 1, numBatch, time.time() - start_time, loss))
iter_num += 1
......@@ -229,7 +260,7 @@ class denoiser(object):
self.evaluate(iter_num, eval_data, sample_dir=sample_dir, summary_merged=summary_psnr,
summary_writer=writer, sess_name=sess_name, phase_type=phase_type) # eval_data value range is 0-255
self.save(iter_num, ckpt_dir)
self.save(iter_num, ckpt_dir_)
fres.write("[*] Finish training.\n")
fres.close()
print("[*] Finish training.")
......@@ -262,6 +293,7 @@ class denoiser(object):
def load(self, checkpoint_dir):
#else load from the latest checkpoint
print("[*] Reading checkpoint...")
print(checkpoint_dir)
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
......@@ -273,11 +305,11 @@ class denoiser(object):
else:
return False, 0
def test(self, test_data, ckpt_index, save_dir, save_name, phase_type):
def test(self, test_data, ckpt_dir, save_dir, save_name, phase_type):
"""Test DnCNN"""
#check inputs
#ckpt, _ = os.path.splitext(ckpt_index)
assert os.path.exists(ckpt_index) == 1, 'No checkpoint here!'
assert os.path.exists(ckpt_dir) == 1, 'No checkpoint here!'
N = len(test_data)
assert N != 0, 'No testing data!'
......@@ -295,7 +327,8 @@ class denoiser(object):
# init variables
tf.global_variables_initializer().run()
#tf.initialize_all_variables().run()
ckpt_dir, ckpt_model = os.path.split(ckpt_index)
#ckpt_dir, ckpt_model = os.path.split(ckpt_index)
print(ckpt_dir)
load_model_status, global_step = self.load(ckpt_dir)
#load_model_status, global_step = self.load_from_checkpoint(ckpt_index)
......@@ -328,8 +361,8 @@ class denoiser(object):
psnr = cal_psnr(clean_ref_rad, clean_pred_rad)
# calculate std on phase
dev = cal_std_phase(clean_ref_rad, clean_pred_rad)
ftest.write("test img PSNR: %.2f, STD: %.2f\n" % (psnr, dev))
print("test img %s PSNR: %.2f, STD: %.2f" % (save_name, psnr, dev))
ftest.write("test img PSNR: %.3f, STD: %.3f\n" % (psnr, dev))
print("test img %s PSNR: %.3f, STD: %.3f" % (save_name, psnr, dev))
save_images(os.path.join(save_dir, '%s.tiff' % (save_name + '-' + str(global_step))), clean_pred_rad)
save_MAT_images(os.path.join(save_dir, '%s.mat' %(save_name + '-' + str(global_step))), clean_pred_rad)
ftest.close()
......
import numpy as np
import sys
import re
epochPattern = re.compile(r'^Epoch: \[(.*)\] \[')
sessName = sys.argv[1]
imStd = {}
imPsnr = {}
nepochs = 0
imEpoch = []
with open(sessName) as f:
for line in f:
#im = 'img' + str(i)
m = epochPattern.match(line)
if m:
nepochs = m.group(1)
for i in range(1,26):
pattern = re.compile(r'img' + str(i) + ' ')
# print(pattern)
if pattern.search(line):
if i == 1:
imEpoch.append(nepochs)
tab = line.split(' ')
std_ = float(tab[4])
psnr_ = float(tab[2][:-2])
# print(i, std_, psnr_)
if i in imStd:
imStd[i].append(std_)
imPsnr[i].append(psnr_)
else:
imStd[i] = [std_]
imPsnr[i] = [psnr_]
#print(imEpoch)
noise = {0.0: [1, 6, 11, 16, 21],
1.0: [2, 7, 12, 17, 22],
1.5: [3, 8, 13, 18, 23],
2.0: [4, 9, 14, 19, 24],
2.5: [5, 10, 15, 20, 25]}
stdt = np.array([imStd[i] for i in range(1,25)]).mean(axis=0)
psnrt = np.array([imPsnr[i] for i in range(1, 26)]).mean(axis=0)
minEpoch = np.argmin(stdt)
print('NumEpochs:', imEpoch[-1])
print('BestEpoch ', imEpoch[minEpoch])
x_avg = 'avg %.3f %.3f' %(stdt[minEpoch], psnrt[minEpoch])
print(x_avg.replace('.',','))
for n in noise:
stdn = np.array([imStd[i] for i in noise[n]]).mean(axis=0)
psnrn = np.array([imPsnr[i] for i in noise[n]]).mean(axis=0)
#print(stdn.shape)
#print(stdn)
#minEpoch = np.argmin(stdn)
x = '%.1f %.3f %.3f' %(n, stdn[minEpoch], psnrn[minEpoch])
print(x.replace('.',','))
#!/bin/bash
#SBATCH -p gpu
#SBATCH --gres gpu:1
#noisyImg=$1
#cleanImg=$2
runTest=/lium/raid01_c/tahon/holography/checkpoints/run-test2020-04-12_12\:14\:29.082341/
for num in 1 2 3 4 5; do
for lambda in 0 1 1p5 2 2p5; do
noisyImg=/lium/raid01_c/tahon/holography/HOLODEEPmat/PATTERN$num/MFH_$lambda/NoisyPhase.mat
#noisyImg=/lium/raid01_c/tahon/holography/HOLODEEPmat/PATTERN$num/MFH_$lambda/run-test2020-04-12_12\:14\:29.082341/run-test2020-04-12_12\:14\:29.082341/NoisyPhase.mat-27000.mat-27000.mat
cleanImg=/lium/raid01_c/tahon/holography/HOLODEEPmat/PATTERN$num/PhaseDATA.mat
echo $noisyImg >> DL-Py6-epoch.res
python main_holo.py --test_noisy_img $noisyImg --test_noisy_key 'NoisyPhase' --test_clean_img $cleanImg --test_clean_key 'Phase' --test_flip False --test_ckpt_index $runTest --params "phase=test, lr=0.001, train_noise=0, nb_layers=4" >> DL-Py6-epoch.res
done
done
test1=/lium/raid01_c/tahon/holography/DATAEVAL/DATA_1_Phase_Type1_2_0.25_1.5_4_50.mat
test2=/lium/raid01_c/tahon/holography/DATAEVAL/DATA_20_Phase_Type4_2_0.25_2.5_4_100.mat
test3=/lium/raid01_c/tahon/holography/DATAEVAL/VibPhaseDATA_masked.mat
keyNoisy='Phaseb'
keyClean='Phase'
echo $test1
python main_holo.py --test_noisy_img $test1 --test_noisy_key $keyNoisy --test_clean_img $test1 --test_clean_key $keyClean --test_flip False --test_ckpt_index $runTest --params "phase=test, lr=0.0001, train_noise=0-1-1.5, nb_layers=4" >> DL-Py6-epoch.res
echo $test2
python main_holo.py --test_noisy_img $test2 --test_noisy_key $keyNoisy --test_clean_img $test2 --test_clean_key $keyClean --test_flip False --test_ckpt_index $runTest --params "phase=test, lr=0.0001, train_noise=0-1-1.5, nb_layers=4" >> DL-Py6-epoch.res
echo $test3
python main_holo.py --test_noisy_img $test3 --test_noisy_key $keyNoisy --test_clean_img $test3 --test_clean_key $keyClean --test_flip False --test_ckpt_index $runTest --params "phase=test, lr=0.0001, train_noise=0-1-1.5, nb_layers=4" >> DL-Py6-epoch.res
......@@ -52,11 +52,13 @@ __status__ = "Production"
def extract_sess_name(lp, ln, pt, stride, ps, np):
#example of the call of the function:
#sess_name = extract_sess_name(hparams.train_patterns, hparams.train_noise, hparams.phase_type, hparams.stride, hparams.patch_size, hparams.patch_per_image)
return '-'.join(map(str, lp)) + '_' + '-'.join(map(str, ln)) + '_' + pt + '_' + str(stride) + '_' + str(ps) + '_' + str(np)
#return '-'.join(map(str, lp)) + '_' + '-'.join(map(str, ln)) + '_' + pt + '_' + str(stride) + '_' + str(ps) + '_' + str(np)
return '-'.join(map(str, lp)) + '_' + ln + '_' + pt + '_' + str(stride) + '_' + str(ps) + '_' + str(np)
def get_files(path, regexp):
list_files = []
for root, dirs, files in os.walk(path):
#print(root, dirs, files)
for name in files:
#print(name, regexp)
match = re.match(regexp, name)
......@@ -66,8 +68,12 @@ def get_files(path, regexp):
return sorted(list_files)
def from_NATURAL(dir_noise, dir_clean, path_only):
select_noisy = sorted(glob(dir_noise + '/*.png'))
select_clean = sorted(glob(dir_clean + '/*.png'))
print(dir_noise, dir_clean)
regExp = '.*.png'
#select_noisy = sorted(glob(dir_noise + '/*.png'))
#select_clean = sorted(glob(dir_clean + '/*.png'))
select_noisy = get_files(pathlib.Path(dir_noise), regExp)
select_clean = get_files(pathlib.Path(dir_clean), regExp)
if path_only:#return only the filenames, not the images
return select_clean, select_noisy
......@@ -86,7 +92,7 @@ def from_NATURAL(dir_noise, dir_clean, path_only):
def from_HOLODEEP(dir_noise, dir_clean, noise_eval, img_eval, path_only):
pattern = {1: ('0','1'), 2: ('0','2'), 3: ('0','3'), 4:('73', '1'), 5:('100','1')}
nois_pat = [str(n).replace('.','p') for n in noise_eval]
nois_pat = [str(n).replace('.','p') for n in noise_eval.split('-')]
regExp = 'MFH2('
for p in img_eval:
......@@ -126,7 +132,7 @@ def from_HOLODEEP(dir_noise, dir_clean, noise_eval, img_eval, path_only):
def from_DATABASE(dir_data, noise_eval, img_eval, flipupdown = False):
select_noisy = []
select_clean = []
nois_pat = [str(n).replace('.','p') for n in noise_eval]
nois_pat = [str(n).replace('.','p') for n in noise_eval.split('-')]
for p in img_eval:
pat = dir_data + 'PATTERN' + str(p) + '/'
for n in nois_pat:
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment