Commit 899923e8 authored by Marie Tahon's avatar Marie Tahon
Browse files

add test function for one image at a time

parent d813ab0e
......@@ -35,33 +35,36 @@ __status__ = "Production"
# Default hyperparameters:
hparams = tf.contrib.training.HParams(
noise_src_dir = '/lium/raid01_c/tahon/holography/HOLODEEP/',
clean_src_dir = '/lium/raid01_c/tahon/holography/NOISEFREEHOLODEEP/',
#noise_src_dir = '/lium/raid01_c/tahon/holography/HOLODEEP/',
noise_src_dir = '/lium/raid01_c/tahon/holography/NATURAL/',
#clean_src_dir = '/lium/raid01_c/tahon/holography/NOISEFREEHOLODEEP/',
clean_src_dir = '/lium/raid01_c/tahon/holography/NOISEFREENATURAL/',
eval_dir = '/lium/raid01_c/tahon/holography/HOLODEEPmat/',
#test_dir = 'lium/raid01_c/tahon/holography/TEST/',
phase = 'train', #train or test phase
#image
isDebug = False, #True,#reate only 10 patches
originalsize = (1024,1024), #1024 for matlab database, 128 for holodeep database
phase_type = 'phi', #keep phase between -pi and pi (phi), convert into cosinus (cos) or sinus (sin)
phase_type = 'two', #keep phase between -pi and pi (phi), convert into cosinus (cos) or sinus (sin)
#select images for training
train_patterns = [1, 2, 3, 5], #number of images from 1 to 5
train_noise = [0, 1, 1.5],
train_patterns = [1, 2, 3, 4, 5], #number of images from 1 to 5
train_noise = [0, 1, 1.5, 2, 2.5],
#select images for evaluation (during training)
eval_patterns = [4],
eval_patterns = [1, 2, 3, 4, 5],
eval_noise = [0, 1, 1.5, 2, 2.5],
#select images for testing
test_patterns = [5],
test_noise = [0, 1, 1.5],
test_patterns = [1, 2, 3, 4, 5],
test_noise = [0, 1, 1.5, 2, 2.5],
noise_type = 'spkl', #type of noise: speckle or gaussian (spkl|gauss)
sigma = 25, #noise level for gaussian denoising
#Training
batch_size = 64,#128
patch_per_image = 350, # Silvio a utilisé 384 pour des images 1024*1024
patch_size = 50, #70 for grayscale images 50 for colour images.
epoch = 200,#2000
lr = 0.0005, # learning rate
patch_per_image = 384, # Silvio a utilisé 384 pour des images 1024*1024
patch_size = 50, #Silvio a utilisé 50.
epoch = 2000,#2000
lr = 0.001, # learning rate
stride = 50, # spatial step for cropping images values from initial script 10
step = 0, #initial spatial setp for cropping
scales = [1] #[1, 0.9, 0.8, 0.7] # scale for data augmentation
......
......@@ -47,22 +47,16 @@ __status__ = "Production"
#import ipdb
parser = argparse.ArgumentParser(description='')
#parser.add_argument('--epoch', dest='epoch', type=int, default=, help='# of epoch')
#parser.add_argument('--batch_size', dest='batch_size', type=int, default=128, help='# images in batch')
#parser.add_argument('--lr', dest='lr', type=float, default=0.001, help='initial learning rate for adam')
parser.add_argument('--use_gpu', dest='use_gpu', type=int, default=1, help='gpu flag, 1 for GPU and 0 for CPU')
#parser.add_argument('--sigma', dest='sigma', type=int, default=25, help='noise level')
#parser.add_argument('--phase', dest='phase', default='train', help='train or test')
parser.add_argument('--checkpoint_dir', dest='ckpt_dir', default='./checkpoint', help='models are saved here')
parser.add_argument('--sample_dir', dest='sample_dir', default='./sample', help='sample are saved here')
#parser.add_argument('--clean_src_dir', dest='clean_src_dir', default='/lium/raid01_c/tahon/holography/NOISEFREEHOLODEEP', help='dir of clean data')
#parser.add_argument('--noise_src_dir', dest='noise_src_dir', default='/lium/raid01_c/tahon/holography/HOLODEEP', help='dir of noisy data')
parser.add_argument('--test_dir', dest='test_dir', default='./test', help='test sample are saved here')
parser.add_argument('--params', dest='params', type=str, default='', help='hyper parameters')
#parser.add_argument('--eval_set_clean', dest='eval_set_clean', default='NOISEFREEHOLODEEPtest', help='clean dataset for eval in training')
#parser.add_argument('--eval_set_noisy', dest='eval_set_noisy', default='HOLODEEPtest', help='noisy dataset for eval in training')
parser.add_argument('--test_set', dest='test_set', default='BSD68', help='dataset for testing')
parser.add_argument('--save_dir', dest='save_dir', default='./data0/', help='dir of patches')
parser.add_argument('--test_noisy_img', dest='noisy_img', type=str, default='', help='name and directory of the noisy image for testing')
parser.add_argument('--test_clean_img', dest='clean_img', type=str, default='', help='name and directory of the clean image for testing (eventually none)')
parser.add_argument('--test_flip', dest='flip', type=bool, default=False, help='option for upside down flip of noisy (and clean) test image')
parser.add_argument('--test_ckpt_index', dest='ckpt_index', type=str, default='', help='name and directory of the checkpoint that will be restored.')
parser.add_argument('--save_dir', dest='save_dir', default='./data1/', help='dir of patches')
args = parser.parse_args()
#ipdb.set_trace()
hparams.parse(args.params)
......@@ -74,30 +68,39 @@ def denoiser_train(denoiser, lr):
print('session name: ', sess_name)
train_data= load_train_data(filepath=args.save_dir + 'img_clean_train_' + sess_name + '.npy', noisyfilepath=args.save_dir + 'img_noisy_train_' + sess_name + '.npy', phase_type=hparams.phase_type)
# if there is a small memory, please comment this line and uncomment the line99 in model.py
#data = data.astype(np.float32) / 255.0 # normalize the data to 0-1 -> data normalization is done directly in utils.py
#eval_files = glob('./data/test/{}/*.png'.format(args.eval_set))
#eval_files_clean = glob('./data/test/{}/*.tiff'.format(args.eval_set_clean))
#eval_files_noisy = glob('./data/test/{}/*.tiff'.format(args.eval_set_noisy))
#load eval data from HOLODEEP database
#eval_data = from_HOLODEEP(hparams.noise_src_dir, hparams.clean_src_dir, hparams.eval_noise, hparams.eval_patterns, hparams.isDebug)
#load eval data from DATABASE
#eval_data = from_DATABASE(hparams.eval_dir, hparams.eval_noise, hparams.eval_patterns, hparams.isDebug)
#load eval data from DATABASE Matlab
eval_data = load_eval_data(hparams.eval_dir, hparams.eval_noise, hparams.eval_patterns)
#eval_data = load_images(eval_files_clean, eval_files_noisy, hparams.phase_type) # list of array of different size, 4-D, pixel value range is 0-255
#ipdb.set_trace()
print('train data shape:', train_data[0].shape, type(train_data))
print('eval data shape:', eval_data[0][0].shape, type(eval_data))
#ipdb.set_trace()
denoiser.train(train_data, eval_data, batch_size=hparams.batch_size, ckpt_dir=args.ckpt_dir, epoch=hparams.epoch, lr=lr,
sample_dir=args.sample_dir, phase_type=hparams.phase_type)
denoiser.train(train_data, eval_data, batch_size=hparams.batch_size, ckpt_dir=args.ckpt_dir, epoch=hparams.epoch, lr=lr, sample_dir=args.sample_dir, phase_type=hparams.phase_type)
def denoiser_test(denoiser):
test_files = glob('./data/test/{}/*.png'.format(args.test_set))
denoiser.test(test_files, ckpt_dir=args.ckpt_dir, save_dir=args.test_dir)
#noisy = load_test_data(args.noisy_img, key = 'NoisyPhase', flipupdown = args.flip)
noisy = load_test_data(args.noisy_img, key = 'Phaseb', flipupdown = args.flip) #pour vibPhase
print('load noisy ref')
if args.clean_img:
print('load clean ref')
clean = load_test_data(args.clean_img, key = 'Phase', flipupdown = args.flip)
else:
clean = noisy
test_files = (clean, noisy)
test_dir, test_name = os.path.split(args.noisy_img)
sess_name = args.ckpt_index.split('/')[-2]
print(sess_name)
save_dir = test_dir + '/' + sess_name + '/'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
denoiser.test(test_files, ckpt_index=args.ckpt_index, save_dir=save_dir, save_name=test_name, phase_type= hparams.phase_type)
def main(_):
......
# -*- coding: utf-8 -*-
#
# This file is part of DnCnn4Holo.
#
# DnCnn4Holo is a python script for phase image denoising.
# Home page: https://git-lium.univ-lemans.fr/tahon/dncnn-tensorflow-holography
#
# Adapted from https://github.com/wbhu/DnCNN-tensorflow by Hu Wenbo
#
# DnCnn4Holo is free software: you can redistribute it and/or modify
# it under the terms of the GNU LLesser General Public License as
# published by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# DnCnn4Holo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DnCnn4Holo. If not, see <http://www.gnu.org/licenses/>.
"""
Copyright 2019-2020 Marie Tahon
:mod:`model.py` define the nneural network and train it
"""
import time
import tensorflow as tf
from utils import *
......@@ -35,14 +6,6 @@ from hparams import hparams_debug_string
import numpy as np
#import ipdb
__license__ = "LGPL"
__author__ = "Marie Tahon"
__copyright__ = "Copyright 2019-2020 Marie Tahon"
__maintainer__ = "Marie Tahon"
__email__ = "marie.tahon@univ-lemans.fr"
__status__ = "Production"
#__docformat__ = 'reStructuredText'
def _activation(x):
y = tf.nn.relu(x + np.pi)
z = tf.nn.relu(2 * np.pi - y) - np.pi
......@@ -61,7 +24,7 @@ def loss_function(ref, pred, phase_type):
def dncnn(input, is_training=True, output_channels=1):
with tf.variable_scope('block1'):
output = tf.layers.conv2d(input, 64, 3, padding='same', activation=tf.nn.relu)#attention c'etait tf.nn.relu
for layers in range(2, 4 + 1): #16 + 1
for layers in range(2, 4 + 1): #4 + 1): #16 + 1
with tf.variable_scope('block%d' % layers):
output = tf.layers.conv2d(output, 64, 3, padding='same', name='conv%d' % layers, use_bias=False)
output = tf.nn.relu(tf.layers.batch_normalization(output, training=is_training))
......@@ -117,13 +80,9 @@ class denoiser(object):
fres.write('clean ref for evaluation max %.2f; min %.2f\n' %(np.max(clean_ref), np.min(clean_ref)))
print('clean ref eval', np.max(clean_ref), np.min(clean_ref))
noisy_ref = test_data[1].copy()
#s =(1, clean_ref.shape[1], clean_ref.shape[2], clean_ref.shape[3])
#test_data_noisy = test_data[1]
for idx in range(len(clean_ref)):
clean_ref_rad = clean_ref[idx]
noisy_ref_rad = noisy_ref[idx]
#clean_ref_01 = normalize_data(clean_ref_rad, 'phi', None) #reference residual Y
#noisy_ref_01 = normalize_data(noisy_ref_rad, 'phi', None) #reference noisy X
if phase_type == 'phi':
clean_pred_rad, psnr_summary_phi = self.sess.run([self.Y, summary_merged], feed_dict={self.Y_: clean_ref_rad, self.X:noisy_ref_rad, self.is_training: False})
summary_writer.add_summary(psnr_summary_phi, iter_num)
......@@ -139,44 +98,29 @@ class denoiser(object):
clean_pred_sin, psnr_summary_sin = self.sess.run([self.Y, summary_merged], feed_dict={self.Y_: clean_ref_sin, self.X:noisy_ref_sin, self.is_training: False})
summary_writer.add_summary(psnr_summary_cos, iter_num)
summary_writer.add_summary(psnr_summary_sin, iter_num)
#output_pred_cos = norm_to_sincos(clean_pred_cos)
#output_pred_sin = norm_to_sincos(clean_pred_sin)
clean_pred_rad = np.angle(clean_pred_cos + clean_pred_sin * 1j) #phase obtained between -pi and pi
#print(clean_image.shape)
#old version where noisy X is generated by adding gaussian noise to Y_
#output_clean_image, noisy_image, psnr_summary = self.sess.run([self.Y, self.X, summary_merged], feed_dict={self.Y_: clean_image, self.is_training: False})
#print(idx)
fres.write('std phase between clean and noisy ref: %.2f\n' %(cal_std_phase(clean_ref_rad, noisy_ref_rad)))
print('clean ref radian:', np.max(clean_ref_rad), np.min(clean_ref_rad))
fres.write('clean ref radians: %.2f %.2f\n' %(np.max(clean_ref_rad), np.min(clean_ref_rad)))
#print('clean ref 0-1:', np.max(clean_ref_01), np.min(clean_ref_01))
#fres.write('clean ref 0-1: %.2f %.2f\n' %(np.max(clean_ref_01), np.min(clean_ref_01)))
#print('clean pred 0-1:', np.max(clean_pred_01), np.min(clean_pred_01))
#fres.write('clean pred 0-1: %.2f %.2f\n' %(np.max(clean_pred_01), np.min(clean_pred_01)))
#print('noisy ref phase', np.max(output_ref), np.min(output_ref))
#fres.write('noisy ref phase: %.2f %.2f\n' %(np.max(output_ref), np.min(output_ref)))
print('clean pred rad', np.max(clean_pred_rad), np.min(clean_pred_rad))
#fres.write('clean pred rad resisdual: %.2f %.2f\n' %(np.max(clean_pred_rad_res), np.min(clean_pred_rad_res)))
#print('clean pred rad residual', np.max(clean_pred_rad_res), np.min(clean_pred_rad_res))
fres.write('clean pred rad: %.2f %.2f\n' %(np.max(clean_pred_rad), np.min(clean_pred_rad)))
#groundtruth = np.clip(255 * clean_ref_phi, 0, 255).astype('uint8')
#noisyimage = np.clip(255* noisy_ref_phi, 0, 255).astype('uint8')
#outputimage = np.clip(255 * clean_pred_phi, 0, 255).astype('uint8')
# calculate PSNR
#psnr = cal_psnr(groundtruth, outputimage)
psnr = cal_psnr(clean_ref_rad, clean_pred_rad)
# calculate std on phase
#dev = cal_std_phase(groundtruth, outputimage)
dev = cal_std_phase(clean_ref_rad, clean_pred_rad)
fres.write("img%d PSNR: %.2f, STD: %.2f\n" % (idx + 1, psnr, dev))
print("img%d PSNR: %.2f, STD: %.2f" % (idx + 1, psnr, dev))
#print("img%d STD: %.2f" % (idx + 1, dev))
psnr_sum += psnr
dev_sum += dev
if idx < 10:
save_images(os.path.join(sample_dir, 'test%d_%d.tiff' % (idx + 1, iter_num)), clean_ref_rad, noisy_ref_rad, clean_pred_rad)
#save_images(os.path.join(sample_dir, 'test%d_%d.png' % (idx + 1, iter_num)), groundtruth, noisyimage, outputimage)
avg_psnr = psnr_sum / len(clean_ref)
avg_dev = dev_sum / len(clean_ref)
fres.write("--- Test ---- Average PSNR %.2f ---\n" % avg_psnr)
......@@ -195,6 +139,7 @@ class denoiser(object):
return output_clean_image, noisy_image, psnr
def train(self, data, eval_data, batch_size, ckpt_dir, epoch, lr, sample_dir, phase_type, eval_every_epoch=5):
phase_augmentation = True
sess_name = 'run-test' + str(datetime.now()).replace(' ', '_')
ckpt_dir = ckpt_dir + '/' + sess_name + '/'
sample_dir = sample_dir + '/' + sess_name + '/'
......@@ -210,17 +155,19 @@ class denoiser(object):
#clean_ref = data[0]
#noisy_ref = data[1]
numPatch = data[0].shape[0]
if (phase_type == 'phi') | (phase_type == 'cos') | (phase_type == 'sin'):
#if phase_type is phi do nothing to the values
#if phase_type is cos or sin apply cos or sin function to all clean and noisy values
#if phase_type is two and data_augmentation is False apply cos or sin function for half of the values selected randomely
if (phase_type == 'phi') | (phase_type == 'cos') | (phase_type == 'sin') | (phase_augmentation == True):
rdm = None
elif phase_type == 'two':
rdm = np.random.randint(0, 2, numPatch)
else:
#print('phase type does not exist')
sys.exit('phase type does not exist')
data_noisy = normalize_data(data[1], phase_type, rdm)
data_clean = normalize_data(data[0], phase_type, rdm)
data_noisy = normalize_data(data[1], phase_type, rdm, phase_augmentation)
data_clean = normalize_data(data[0], phase_type, rdm, phase_augmentation)
numPatch = data_noisy.shape[0]
fres.write('[*] Normalize training data with phase type as %s \n' %(phase_type))
print('[*] Normalize training data with phase type as ', phase_type)
#ipdb.set_trace()
......@@ -259,7 +206,12 @@ class denoiser(object):
#ipdb.set_trace()
self.evaluate(iter_num, eval_data, sample_dir=sample_dir, summary_merged=summary_psnr, summary_writer=writer, sess_name=sess_name, phase_type=phase_type) # eval_data value range is 0-255
for epoch in range(start_epoch, epoch):
#np.random.shuffle(data) no shuffle for the moment
#np.random.shuffle(data) #no shuffle for the moment
#shuffle target and source synchronously with random permutation at each epoch.
ind = np.random.permutation(numPatch)
data_clean = data_clean[ind, :,:,:]
data_noisy = data_noisy[ind, :,:,:]
for batch_id in range(start_step, numBatch):
fres = open(sample_dir + '/res', 'a')
batch_images_clean = data_clean[batch_id * batch_size:(batch_id + 1) * batch_size, :, :, :]
......@@ -290,82 +242,93 @@ class denoiser(object):
os.path.join(checkpoint_dir, model_name),
global_step=iter_num)
def load_from_checkpoint(self, ckpt_meta):
print("[*] Reading checkpoint...")
ckpt_dir, ckpt_model = os.path.split(ckpt_meta)
ckpt, _ = os.path.splitext(ckpt_model)
#saver = tf.train.Saver()
#load from a specific checkpoint
global_step = int(ckpt_model.split('-')[-1].split('.')[0])
print('global step is: ', global_step)
saver = tf.train.import_meta_graph(ckpt_meta)
#print(os.path.join(ckpt_dir, ckpt))
print(ckpt_dir, ckpt)
saver.restore(self.sess, ckpt)
#saver.restore(self.sess, tf.train.latest_checkpoint(ckpt_dir))
return True, global_step
def load(self, checkpoint_dir):
#else load from the latest checkpoint
print("[*] Reading checkpoint...")
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
full_path = tf.train.latest_checkpoint(checkpoint_dir)
print(full_path)
global_step = int(full_path.split('/')[-1].split('-')[-1])
saver.restore(self.sess, full_path)
return True, global_step
else:
return False, 0
# def test(self, test_files, ckpt_dir, save_dir):
# """Test DnCNN"""
# # init variables
# tf.initialize_all_variables().run()
# assert len(test_files) != 0, 'No testing data!'
# load_model_status, global_step = self.load(ckpt_dir)
# assert load_model_status == True, '[!] Load weights FAILED...'
# print(" [*] Load weights SUCCESS...")
# psnr_sum = 0
# print("[*] " + 'noise level: ' + str(self.sigma) + " start testing...")
# for idx in range(len(test_files)):
# #load images ne normalize pas les images par rapport à 255.0 il faut donc le faire
# clean_ref = load_images(test_files[idx])
# output_clean_image, noisy_image = self.sess.run([self.Y, self.X],
# saver = tf.train.Saver()
# checkpoint_dir = ckpt_dir
# if not os.path.exists(checkpoint_dir):
# os.makedirs(checkpoint_dir)
# print("[*] Saving model...")
# saver.save(self.sess, os.path.join(checkpoint_dir, model_name), global_step=iter_num)
def test(self, test_data, ckpt_index, save_dir, save_name, phase_type):
"""Test DnCNN"""
#check inputs
#ckpt, _ = os.path.splitext(ckpt_index)
assert os.path.exists(ckpt_index) == 1, 'No checkpoint here!'
N = len(test_data)
assert N != 0, 'No testing data!'
clean_ref_rad = test_data[0].copy() #between -pi and pi
noisy_ref_rad = test_data[1].copy()
def test(self, test_files, ckpt_dir, save_dir):
"""Test DnCNN"""
ftest = open(save_dir + '/test.res', 'a')
ftest.write('clean ref for test max %.2f; min %.2f\n' %(np.max(clean_ref_rad), np.min(clean_ref_rad)))
#make summary
summary_psnr = tf.summary.scalar('eva_psnr', self.eva_psnr)
summary_writer = tf.summary.FileWriter('./logs/' + save_dir, self.sess.graph)
# init variables
tf.initialize_all_variables().run()
assert len(test_files) != 0, 'No testing data!'
tf.global_variables_initializer().run()
#tf.initialize_all_variables().run()
ckpt_dir, ckpt_model = os.path.split(ckpt_index)
load_model_status, global_step = self.load(ckpt_dir)
#load_model_status, global_step = self.load_from_checkpoint(ckpt_index)
assert load_model_status == True, '[!] Load weights FAILED...'
print(" [*] Load weights SUCCESS...")
psnr_sum = 0
print("[*] " + 'noise level: ' + str(self.sigma) + " start testing...")
clean_ref = test_files[0]
noisy_ref = test_files[1]
for idx in range(len(test_files)):
#load images ne normalize pas les images par rapport à 255.0 il faut donc le faire
clean_ref_phi = normalize_data(clean_ref[idx], 'phi', None)
noisy_ref_phi = normalize_data(noisy_ref[idx], 'phi', None)
if phase_type == 'phi':
clean_pred_phi, psnr_summary_phi = self.sess.run([self.Y, summary_merged], feed_dict={self.Y_: clean_ref_phi, self.X:noisy_ref_phi, self.is_training: False})
summary_writer.add_summary(psnr_summary_phi, iter_num)
else:
clean_ref_cos = normalize_data(clean_ref[idx], 'cos', None)
noisy_ref_cos = normalize_data(noisy_ref[idx], 'cos', None)
clean_pred_cos, psnr_summary_cos = self.sess.run([self.Y, summary_merged], feed_dict={self.Y_: clean_ref_cos, self.X:noisy_ref_cos, self.is_training: False})
clean_ref_sin = normalize_data(clean_ref[idx], 'sin', None)
noisy_ref_sin = normalize_data(noisy_ref[idx], 'sin', None)
clean_pred_sin, psnr_summary_sin = self.sess.run([self.Y, summary_merged], feed_dict={self.Y_: clean_ref_sin, self.X:noisy_ref_sin, self.is_training: False})
summary_writer.add_summary(psnr_summary_cos, iter_num)
summary_writer.add_summary(psnr_summary_sin, iter_num)
clean_pred_phi = np.angle(clean_pred_cos + clean_pred_sin * 1j) #phase obtained between -pi and pi
#print(clean_image.shape)
#old version where noisy X is generated by adding gaussian noise to Y_
#output_clean_image, noisy_image, psnr_summary = self.sess.run([self.Y, self.X, summary_merged], feed_dict={self.Y_: clean_image, self.is_training: False})
#print(idx)
#groundtruth = np.clip(255 * clean_ref_phi, 0, 255).astype('uint8')
#noisyimage = np.clip(255* noisy_ref_phi, 0, 255).astype('uint8')
#outputimage = np.clip(255 * clean_pred_phi, 0, 255).astype('uint8')
# calculate PSNR
#psnr = cal_psnr(groundtruth, outputimage)
psnr = cal_psnr(clean_ref_phi, clean_pred_phi)
# calculate std on phase
#dev = cal_std_phase(groundtruth, outputimage)
dev = cal_std_phase(clean_ref_phi, clean_pred, phi)
print("img%d PSNR: %.2f, STD: %.2f" % (idx + 1, psnr, dev))
#print("img%d STD: %.2f" % (idx + 1, dev))
psnr_sum += psnr
ftest.write(" [*] Load weights SUCCESS...\n")
ftest.write('global step is %s\n' %(global_step))
#print('clean ref test', np.max(clean_ref_rad), np.min(clean_ref_rad))
#print(clean_ref_rad.shape, noisy_ref_rad.shape)
if phase_type == 'phi':
clean_pred_rad, psnr_summary_phi = self.sess.run([self.Y, summary_psnr], feed_dict={self.Y_: clean_ref_rad, self.X:noisy_ref_rad, self.is_training: False})
summary_writer.add_summary(psnr_summary_phi, global_step)
else:
clean_ref_cos = normalize_data(clean_ref_rad, 'cos', None)
noisy_ref_cos = normalize_data(noisy_ref_rad, 'cos', None)
clean_pred_cos, psnr_summary_cos = self.sess.run([self.Y, summary_psnr], feed_dict={self.Y_: clean_ref_cos, self.X:noisy_ref_cos, self.is_training: False})
clean_ref_sin = normalize_data(clean_ref_rad, 'sin', None)
noisy_ref_sin = normalize_data(noisy_ref_rad, 'sin', None)
clean_pred_sin, psnr_summary_sin = self.sess.run([self.Y, summary_psnr], feed_dict={self.Y_: clean_ref_sin, self.X:noisy_ref_sin, self.is_training: False})
summary_writer.add_summary(psnr_summary_cos, global_step)
summary_writer.add_summary(psnr_summary_sin, global_step)
clean_pred_rad = np.angle(clean_pred_cos + clean_pred_sin * 1j) #phase obtained between -pi and pi
ftest.write('std phase between clean and noisy ref: %.2f\n' %(cal_std_phase(clean_ref_rad, noisy_ref_rad)))
print('clean ref radian:', np.max(clean_ref_rad), np.min(clean_ref_rad))
ftest.write('clean ref radians: %.2f %.2f\n' %(np.max(clean_ref_rad), np.min(clean_ref_rad)))
print('clean pred rad', np.max(clean_pred_rad), np.min(clean_pred_rad))
ftest.write('clean pred rad: %.2f %.2f\n' %(np.max(clean_pred_rad), np.min(clean_pred_rad)))
# calculate PSNR
psnr = cal_psnr(clean_ref_rad, clean_pred_rad)
# calculate std on phase
dev = cal_std_phase(clean_ref_rad, clean_pred_rad)
ftest.write("test img PSNR: %.2f, STD: %.2f\n" % (psnr, dev))
print("test img %s PSNR: %.2f, STD: %.2f" % (save_name, psnr, dev))
save_images(os.path.join(save_dir, '%s.tiff' % (save_name + '-' + str(global_step))), clean_pred_rad)
save_MAT_images(os.path.join(save_dir, '%s.mat' %(save_name + '-' + str(global_step))), clean_pred_rad)
ftest.close()
......@@ -35,7 +35,7 @@ import pathlib
import numpy as np
import tensorflow as tf
from PIL import Image
from scipy.io import loadmat
from scipy.io import loadmat, savemat
#import ipdb
......@@ -105,7 +105,7 @@ def from_HOLODEEP(dir_noise, dir_clean, noise_eval, img_eval, path_only):
return data_clean, data_noisy
# return select_noisy, select_clean
def from_DATABASE(dir_data, noise_eval, img_eval):
def from_DATABASE(dir_data, noise_eval, img_eval, flipupdown = False):
select_noisy = []
select_clean = []
nois_pat = [str(n).replace('.','p') for n in noise_eval]
......@@ -117,24 +117,40 @@ def from_DATABASE(dir_data, noise_eval, img_eval):
#if isDebug: print('-->', len(select_noisy), len(select_clean))
s = loadmat(select_clean[0])['Phase'].shape
#clean = np.zeros(shape= (len(select_clean), s[1], s[0], 1))
clean = []
for file in select_clean:
print('clean eval data: ', file)
im = loadmat(file)['Phase']
#ipdb.set_trace()
#print('-->', np.max(im), np.min(im))
clean.append(np.array(im).reshape(1, im.shape[1], im.shape[0], 1))
im = loadMAT_flip(file, 'Phase', flipupdown)
clean.append(im)
noisy = []
#noisy = np.zeros(shape = (len(select_noisy), s[1], s[0], 1))
for file in select_noisy:
print('noisy eval data: ', file)
im = loadmat(file)['NoisyPhase']
noisy.append(np.array(im).reshape(1, im.shape[1], im.shape[0], 1))
im = loadMAT_flip(file, 'NoisyPhase', flipupdown)
noisy.append(im)
return clean, noisy
def loadMAT_flip(file, key, flipupdown):
s = loadmat(file)
if key in s:
im = np.array(s[key])
else:
print('Existing keys are: ', s.keys())
sys.exit('Key error when loading matlab file')
if flipupdown:
np.flipud(im)
return im.reshape(1, im.shape[1], im.shape[0], 1)
def loadIM_flip(file, key, flipupdown):
im = np.array(Image.open(file).convert('L'))
im = (im * np.pi / 128.0) - np.pi
print(im.min(), im.max())
return im.reshape(1, im.shape[1], im.shape[0], 1)
def wrap_phase(x):
return (x + np.pi) % (2 * np.pi) - np.pi
......@@ -205,7 +221,7 @@ def sincos_to_norm(X):
return (X + 1) / 2
def normalize_data(data,phase_type, rdm):
def normalize_data(data,phase_type, rdm, phase_augmentation = False):
#every data sets are normalized between 0 and 1
if phase_type == 'phi':
return data
......@@ -213,7 +229,7 @@ def normalize_data(data,phase_type, rdm):
return np.cos( data)
elif phase_type == 'sin':
return np.sin(data)
elif phase_type == 'two':
elif (phase_type == 'two') & (phase_augmentation == False):
data_n = np.zeros(shape = data.shape)
cpt = 0
for k, r in enumerate(rdm):
......@@ -224,6 +240,18 @@ def normalize_data(data,phase_type, rdm):
data_n[k,:,:,:] = np.sin( data[k,:,:,:])
print('Nb of cos files :', cpt)
return data_n
elif (phase_type == 'two') & (phase_augmentation == True):
numPatch = data.shape[0]
newshape = (numPatch * 4, data.shape[1], data.shape[2], data.shape[3])
data_n = np.zeros(shape = newshape)
cpt = 0
for k in range(numPatch):
data_n[k,:,:,0] = np.cos( data[k,:,:,0])
data_n[numPatch + k,:,:,0] = np.sin( data[k,:,:,0])
data_n[2*numPatch + k,:,:,0] = np.cos( np.transpose( data[k,:,:,0]) )
data_n[3*numPatch + k,:,:,0] = np.sin( np.transpose( data[k,:,:,0]) )
print('nb of cos / sin / cos + flipud / sin + flipud: ', numPatch)
return data_n
else:
print('[!] phase type not exists (phi|cos|sin|two)')
sys.exit()
......@@ -250,6 +278,7 @@ class train_data():
else:
rdm = None
clean = normalize_data(np.load(self.filepath).astype(np.float32), self.phase_type, rdm) #normalize the data to -1+1
rdm = np.random.randn(1,2, )
noisy = normalize_data(np.load(self.noisyfilepath).astype(np.float32), self.phase_type, rdm) #normalize the data to -1+1
print(clean.shape)
idx = np.random.permutation(clean.shape[0])
......@@ -301,8 +330,16 @@ def load_train_data(filepath='./data/image_clean_patches_train.npy', noisyfilepa
#return train_data(filepath=filepath, noisyfilepath=noisyfilepath, phase_type=phase_type)
def load_test_data(file, key, flipupdown = False):
_, ext = os.path.splitext(file)
if ext == '.mat':
return loadMAT_flip(file, key, flipupdown)
else:
return loadIM_flip(file, key, flipupdown)
def load_eval_data(dir_data, noise_eval, img_eval):
clean, noisy = from_DATABASE(dir_data, noise_eval, img_eval)
clean, noisy = from_DATABASE(dir_data, noise_eval, img_eval, flipupdown = True)
#if phase_type == 'two':
# clean_cos = normalize_data(clean.astype(np.float32), 'cos', None)
# clean_sin = normalize_data(clean.astype(np.float32), 'sin', None)
......@@ -334,19 +371,25 @@ def load_images(filelist, noisyfilelist, phase_type):
return data_clean, data_noisy
def save_images(filepath, ground_truth, noisy_image=None, clean_image=None):
def save_images(filepath, ground_truth, noisy_image=np.array([]), clean_image=np.array([])):
# assert the pixel value range is 0-255
#ground_truth = np.squeeze(ground_truth)
#noisy_image = np.squeeze(noisy_image)
#clean_image = np.squeeze(clean_image)
if not ground_truth.any():
cat_image = ground_truth
elif noisy_image.size == 0 and clean_image.size== 0:
cat_image = ground_truth
else:
cat_image = np.concatenate([ground_truth, noisy_image, clean_image], axis=1)
#im = Image.fromarray(cat_image.astype('uint8')).convert('L')
#im.save(filepath, 'png')
phase_to_image(cat_image, filepath)
def save_MAT_images(filepath, values):
#save values numpy array into matlab format (in order to perform iterations on predicted images)
print(values.reshape(values.shape[1], values.shape[2]).shape)
mdict = {'NoisyPhase': values.reshape(values.shape[1], values.shape[2])}
savemat(filepath, mdict, appendmat = False)
def cal_psnr(im1, im2):
# a