Commit b50c73d4 authored by Marie Tahon's avatar Marie Tahon
Browse files

minor changes

parent 1bbc378f
......@@ -103,7 +103,8 @@ class EvalDataset(NoisyBSDSDataset):
super(EvalDataset, self).__init__(image_mode, image_size)
self.training_name = eval_dir + "-".join([str(pattern) for pattern in patterns]) + "/" + noises
#get full images from HOLODEEP without filpupdow=False.
self.clean, self.noisy = from_DATABASE(eval_dir, noises, patterns, False)
self.clean = np.array(self.clean)
......
......@@ -40,12 +40,14 @@ def save_clean_pred_rad(args, exp, clean_pred_rad, noisy, clean, nom_img = "Nois
std = cal_std_phase(clean_pred_rad, clean)
print("\n")
print("image : ", nom_img)
print("epoch : ", epoch)
print("psnr : ", psnr)
print("std : ", std)
print("\n")
with open(os.path.join(save_name , '%s-%d.res' % (nom_img, exp.epoch)), 'w') as f:
print("image : ", nom_img, file=f)
print("epoch : ", epoch, file=f)
print("psnr : ", psnr, file=f)
print("std : ", std, file=f)
......
......@@ -307,8 +307,18 @@ class Experiment(object):
if plot is not None:
plot(self)
s = time.time()
for param_group in self.optimizer.param_groups:
lr = param_group['lr']
for epoch in range(start_epoch, num_epochs):
if epoch < 30:
current_lr = lr
else:
current_lr = lr / 10.
#set learning rate
for param_group in self.optimizer.param_groups:
param_group['lr'] = current_lr
self.stats_manager.init()
#train
for x, d in self.train_loader:
x, d = x.to(self.net.device), d.to(self.net.device)
self.optimizer.zero_grad()
......
......@@ -37,14 +37,14 @@ import sys
import re
import pathlib
import numpy as np
import tensorflow as tf
#import tensorflow as tf
from PIL import Image
from scipy.io import loadmat, savemat
from glob import glob
#import ipdb
import math
#import math
import nntools as nt
import torch
......@@ -132,7 +132,7 @@ def from_NATURAL(dir_noise, dir_clean, path_only):
def from_HOLODEEP(dir_noise, dir_clean, noise_eval, img_eval, path_only):
"""@deprecated ?
"""@deprecated ? -> yes if we use only .mat files in input with its structure in terms of PATTERNS
This method return the clean and noisy images of the HOLODEEP BDD
Arguments:
......@@ -150,8 +150,6 @@ def from_HOLODEEP(dir_noise, dir_clean, noise_eval, img_eval, path_only):
regExp += pattern[p][0] + n + '2' + pattern[p][1] + '|'
regExp = regExp[:-1] + ')_\d.*.tiff'
#regExp = re.compile(r regExp)
print(regExp)
select_noisy = get_files(pathlib.Path(dir_noise), regExp)
select_clean = get_files(pathlib.Path(dir_clean), regExp)
......@@ -159,13 +157,6 @@ def from_HOLODEEP(dir_noise, dir_clean, noise_eval, img_eval, path_only):
if path_only:#return only the filenames, not the images
return select_clean, select_noisy
#from load_images
#pixel value range 0-255
#if not (isinstance(filelist, list) or isinstance(noisyfilelist,list)):
# exit('Problem with evaluation file list')
#im = Image.open(filelist).convert('L')
#data = np.array(im).reshape(1, im.size[1], im.size[0], 1)
#return data
else: #return the images directly, not only the filenames
data_clean = []
......@@ -204,13 +195,13 @@ def from_DATABASE(dir_data, noise_eval, img_eval, flipupdown = False):
clean = []
for file in select_clean:
print('clean eval data: ', file)
#print('clean eval data: ', file)
im = loadMAT_flip(file, 'Phase', flipupdown)
clean.append(im)
noisy = []
for file in select_noisy:
print('noisy eval data: ', file)
#print('noisy eval data: ', file)
im = loadMAT_flip(file, 'NoisyPhase', flipupdown)
noisy.append(im)
return clean, noisy
......@@ -271,19 +262,16 @@ def phase_to_image(data, name):
data (numpy.array) : The numpy.array to save
name (str) : The saving name
"""
#ipdb.set_trace()
#normalize brute phase between -pi and pi between 0 and 1
#data = (data - data.min())/ (data.max() - data.min())
#if not (data.min() >= -np.pi) and (data.max() <= np.pi):
# data = np.unwrap(data)
#ipdb.set_trace()
data = wrap_phase(data)
data = min_max_norm(data) #resale entre 0 et 1
np.clip(data, 0, 1) #supprime les valeurs inférieures à 0 et supérieures à 1
print(data.min(), data.max())
#print(data.min(), data.max())
data = (data * 255).astype('uint8') #formate les données pour faire une image.
from PIL import Image
im = Image.fromarray(data[0,:,:,0])
im.save(name, 'tiff')
......@@ -406,15 +394,15 @@ def normalize_data(data,phase_type, rdm, phase_augmentation = False):
data_n = np.zeros(shape = newshape)
cpt = 0
for k in range(numPatch):
data_n[0*numPatch + k,:,:,0] = np.cos( data[k,:,:,0])
data_n[1*numPatch + k,:,:,0] = np.sin( data[k,:,:,0])
data_n[2*numPatch + k,:,:,0] = np.cos( np.transpose( data[k,:,:,0]) )
data_n[3*numPatch + k,:,:,0] = np.sin( np.transpose( data[k,:,:,0]) )
data_n[4*numPatch + k,:,:,0] = np.cos( math.pi/4 + data[k,:,:,0])
data_n[5*numPatch + k,:,:,0] = np.sin( math.pi/4 + data[k,:,:,0])
data_n[6*numPatch + k,:,:,0] = np.cos( np.transpose( math.pi/4 + data[k,:,:,0]) )
data_n[7*numPatch + k,:,:,0] = np.sin( np.transpose( math.pi/4 + data[k,:,:,0]) )
print('nb of cos / sin / cos + flipud / sin + flipud: ', numPatch * 8)
data_n[0*numPatch + k,:,:,0] = np.cos( data[k,:,:,0])
data_n[1*numPatch + k,:,:,0] = np.sin( data[k,:,:,0])
data_n[2*numPatch + k,:,:,0] = np.cos( np.transpose( data[k,:,:,0]) )
data_n[3*numPatch + k,:,:,0] = np.sin( np.transpose( data[k,:,:,0]) )
data_n[4*numPatch + k,:,:,0] = np.cos( np.pi/4 + data[k,:,:,0])
data_n[5*numPatch + k,:,:,0] = np.sin( np.pi/4 + data[k,:,:,0])
data_n[6*numPatch + k,:,:,0] = np.cos( np.transpose( np.pi/4 + data[k,:,:,0]) )
data_n[7*numPatch + k,:,:,0] = np.sin( np.transpose( np.pi/4 + data[k,:,:,0]) )
print('nb of (cos + sin) * transpose * phase + pi/4: ', numPatch * 8)
return data_n
else:
print('[!] phase type not exists (phi|cos|sin|two)')
......@@ -442,7 +430,7 @@ class train_data():
else:
rdm = None
clean = normalize_data(np.load(self.filepath).astype(np.float32), self.phase_type, rdm) #normalize the data to -1+1
rdm = np.random.randn(1,2, )
#rdm = np.random.randn(1,2, )
noisy = normalize_data(np.load(self.noisyfilepath).astype(np.float32), self.phase_type, rdm) #normalize the data to -1+1
print(clean.shape)
idx = np.random.permutation(clean.shape[0])
......@@ -478,24 +466,17 @@ def load_train_data(filepath='./data/image_clean_patches_train.npy', noisyfilepa
sys.exit()
print("[*] Loading data...")
clean = np.load(filepath)
noisy = np.load(noisyfilepath)
#if phase_type == 'two':
# rdm = np.random.randint(0, 2, len(filepath))
#else:
# rdm = None
clean = np.load(filepath)
noisy = np.load(noisyfilepath)
#print(xc.shape)
#sys.exit()
#clean = normalize_data(np.load(filepath).astype(np.float32), phase_type, rdm) #normalize the data to -1+1
#noisy = normalize_data(np.load(noisyfilepath).astype(np.float32), phase_type, rdm) #normalize the data to -1+1
#shuffle
#ipdb.set_trace()
idx = np.random.permutation(clean.shape[0])
#np.random.shuffle(self.data)
#print('max / min:', np.max(clean), np.max(noisy), np.min(clean), np.min(noisy))
#clean_p = (clean[idx, :, :, :] - 126) / 126 * np.pi #training data are normalized between -pi and pi
#noisy_p = (noisy[idx, :, :, :] - 126) / 126 * np.pi
#ipdb.set_trace()
print("[*] Load successfully...")
return clean[idx,:,:,:], noisy[idx,:,:,:]
......@@ -522,16 +503,16 @@ def load_eval_data(dir_data, noise_eval, img_eval):
"""
see from_DATABASE
"""
clean, noisy = from_DATABASE(dir_data, noise_eval, img_eval, flipupdown = True)
clean, noisy = from_DATABASE(dir_data, noise_eval, img_eval, flipupdown = False)
#if phase_type == 'two':
# clean_cos = normalize_data(clean.astype(np.float32), 'cos', None)
# clean_sin = normalize_data(clean.astype(np.float32), 'sin', None)
# noisy_cos = normalize_data(noisy.astype(np.float32), 'cos', None)
# noisy_sin = normalize_data(noisy.astype(np.float32), 'sin', None)
# clean_cos = normalize_data(clean.astype(np.float32), 'cos')
# clean_sin = normalize_data(clean.astype(np.float32), 'sin')
# noisy_cos = normalize_data(noisy.astype(np.float32), 'cos')
# noisy_sin = normalize_data(noisy.astype(np.float32), 'sin')
# return clean, noisy, clean_cos, noisy_cos, clean_sin, noisy_sin
#elif phase_type == 'phi':
# clean_phi = normalize_data(clean.astype(np.float32), 'phi', None)
# noisy_phi = normalize_data(clean.astype(np.float32), 'phi', None)
# clean_phi = normalize_data(clean.astype(np.float32), 'phi')
# noisy_phi = normalize_data(clean.astype(np.float32), 'phi')
#clean_n = [x / (2 * np.pi) + 0.5 for x in clean]
#noisy_n = [x / (2 * np.pi) + 0.5 for x in noisy]
return clean, noisy
......@@ -571,7 +552,7 @@ def save_images(filepath, ground_truth, noisy_image=np.array([]), clean_image=np
def save_MAT_images(filepath, values):
#save values numpy array into matlab format (in order to perform iterations on predicted images)
print(values.reshape(values.shape[1], values.shape[2]).shape)
print("original size: ", values.reshape(values.shape[1], values.shape[2]).shape)
mdict = {'NoisyPhase': values.reshape(values.shape[1], values.shape[2])}
savemat(filepath, mdict, appendmat = False)
......@@ -593,7 +574,7 @@ def cal_std_phase(im1, im2):
return dev
<<<<<<< HEAD
def tf_psnr(im1, im2):
'''
this function is deprecated
......@@ -604,8 +585,6 @@ def tf_psnr(im1, im2):
return 10.0 * (tf.log(1 / mse) / tf.log(10.0))
=======
>>>>>>> cfbb377a755bc7b5e2b9ead857e01e5576ec3f67
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment