Commit 68e5494e authored by Marie Tahon's avatar Marie Tahon
Browse files

remove old files and update new ones

parent 89444a0c
FROM tensorflow/tensorflow:1.4.1-gpu
RUN pip install \
numpy
VOLUME /workspace
WORKDIR /workspace
CMD python main.py --phase test --checkpoint_dir ./checkpoint_demo --test_set Set12
import argparse
import glob
from PIL import Image
import PIL
import random
from utils import *
# the pixel value range is '0-255'(uint8 ) of training data
# macro
DATA_AUG_TIMES = 1 # transform a sample to a different sample for DATA_AUG_TIMES times
parser = argparse.ArgumentParser(description='')
parser.add_argument('--src_dir', dest='src_dir', default='./data/Train400', help='dir of data')
parser.add_argument('--save_dir', dest='save_dir', default='./data', help='dir of patches')
parser.add_argument('--patch_size', dest='pat_size', type=int, default=40, help='patch size')
parser.add_argument('--stride', dest='stride', type=int, default=10, help='stride')
parser.add_argument('--step', dest='step', type=int, default=0, help='step')
parser.add_argument('--batch_size', dest='bat_size', type=int, default=128, help='batch size')
# check output arguments
parser.add_argument('--from_file', dest='from_file', default="./data/img_clean_pats.npy", help='get pic from file')
parser.add_argument('--num_pic', dest='num_pic', type=int, default=10, help='number of pic to pick')
args = parser.parse_args()
def generate_patches(isDebug=False):
global DATA_AUG_TIMES
count = 0
filepaths = glob.glob(args.src_dir + '/*.png')
if isDebug:
filepaths = filepaths[:10]
print "number of training data %d" % len(filepaths)
scales = [1, 0.9, 0.8, 0.7]
# calculate the number of patches
for i in xrange(len(filepaths)):
img = Image.open(filepaths[i]).convert('L') # convert RGB to gray
for s in xrange(len(scales)):
newsize = (int(img.size[0] * scales[s]), int(img.size[1] * scales[s]))
img_s = img.resize(newsize, resample=PIL.Image.BICUBIC) # do not change the original img
im_h, im_w = img_s.size
for x in range(0 + args.step, (im_h - args.pat_size), args.stride):
for y in range(0 + args.step, (im_w - args.pat_size), args.stride):
count += 1
origin_patch_num = count * DATA_AUG_TIMES
if origin_patch_num % args.bat_size != 0:
numPatches = (origin_patch_num / args.bat_size + 1) * args.bat_size
else:
numPatches = origin_patch_num
print "total patches = %d , batch size = %d, total batches = %d" % \
(numPatches, args.bat_size, numPatches / args.bat_size)
# data matrix 4-D
inputs = np.zeros((numPatches, args.pat_size, args.pat_size, 1), dtype="uint8")
count = 0
# generate patches
for i in xrange(len(filepaths)):
img = Image.open(filepaths[i]).convert('L')
for s in xrange(len(scales)):
newsize = (int(img.size[0] * scales[s]), int(img.size[1] * scales[s]))
# print newsize
img_s = img.resize(newsize, resample=PIL.Image.BICUBIC)
img_s = np.reshape(np.array(img_s, dtype="uint8"),
(img_s.size[0], img_s.size[1], 1)) # extend one dimension
for j in xrange(DATA_AUG_TIMES):
im_h, im_w, _ = img_s.shape
for x in range(0 + args.step, im_h - args.pat_size, args.stride):
for y in range(0 + args.step, im_w - args.pat_size, args.stride):
inputs[count, :, :, :] = data_augmentation(img_s[x:x + args.pat_size, y:y + args.pat_size, :], \
random.randint(0, 7))
count += 1
# pad the batch
if count < numPatches:
to_pad = numPatches - count
inputs[-to_pad:, :, :, :] = inputs[:to_pad, :, :, :]
if not os.path.exists(args.save_dir):
os.mkdir(args.save_dir)
np.save(os.path.join(args.save_dir, "img_clean_pats"), inputs)
print "size of inputs tensor = " + str(inputs.shape)
if __name__ == '__main__':
generate_patches()
import argparse
#import re
#import glob
import pathlib
import os, sys
......@@ -8,6 +9,8 @@ import PIL
import numpy as np
from utils import *
from hparams import hparams, hparams_debug_string
from scipy.io import loadmat
# the pixel value range is '0-255'(uint8 ) of training data
......@@ -15,36 +18,42 @@ from hparams import hparams, hparams_debug_string
DATA_AUG_TIMES = 1 # transform a sample to a different sample for DATA_AUG_TIMES times
parser = argparse.ArgumentParser(description='')
parser.add_argument('--clean_src_dir', dest='clean_src_dir', default='/lium/raid01_c/tahon/holography/NOISEFREEHOLODEEP', help='dir of clean data')
parser.add_argument('--noise_src_dir', dest='noise_src_dir', default='/lium/raid01_c/tahon/holography/HOLODEEP', help='dir of noisy data')
parser.add_argument('--save_dir', dest='save_dir', default='./data', help='dir of patches')
parser.add_argument('--patch_size', dest='pat_size', type=int, default=50, help='patch size')#50 for RGB and 70 for grayscale
parser.add_argument('--stride', dest='stride', type=int, default=10, help='stride')
parser.add_argument('--step', dest='step', type=int, default=0, help='step')
#parser.add_argument('--batch_size', dest='bat_size', type=int, default=128, help='batch size')
#parser.add_argument('--clean_src_dir', dest='clean_src_dir', default='/lium/raid01_c/tahon/holography/NOISEFREEHOLODEEP', help='dir of clean data')
#parser.add_argument('--noise_src_dir', dest='noise_src_dir', default='/lium/raid01_c/tahon/holography/HOLODEEP', help='dir of noisy data')
#parser.add_argument('--train_image', dest='train_patterns', default=hparams.train_patterns, help='patterns of images for training')
#parser.add_argument('--train_noise', dest='train_noise', default=hparams.train_noise, help='noise values for training images')
parser.add_argument('--save_dir', dest='save_dir', default='./data1', help='dir of patches')
#parser.add_argument('--patch_size', dest='pat_size', type=int, default=hparams.patch_size, help='patch size')#50 for RGB and 70 for grayscale
#parser.add_argument('--stride', dest='stride', type=int, default=hparams.stride, help='stride')
#parser.add_argument('--step', dest='step', type=int, default=hparams.step, help='step')
parser.add_argument('--params', dest='params', type=str, default='', help='hyper parameters')
# check output arguments
parser.add_argument('--from_file', dest='from_file', default="./data/img_clean_pats.npy", help='get pic from file')
#parser.add_argument('--from_file', dest='from_file', default="./data/img_clean_pats.npy", help='get pic from file')
#parser.add_argument('--num_pic', dest='num_pic', type=int, default=10, help='number of pic to pick')
args = parser.parse_args()
#print(args.params['patch_size'])
hparams.parse(args.params)
#def from_DATABASE():
#hparams.parse(args["--hparams"])
def generate_patches(isDebug=True):
#global DATA_AUG_TIMES = 1
hparams.patch_size = args.pat_size
#hparams.patch_size = args.pat_size
print(hparams_debug_string())
src_path = pathlib.Path(args.clean_src_dir)
noisy_path = pathlib.Path(args.noise_src_dir)
filepaths = [x for x in src_path.glob('*.tiff')]
noisyfilepaths = [x for x in noisy_path.glob('*.tiff')]
#filepaths = [x for x in src_path.glob('*.tiff')] #('*.mat')
#noisyfilepaths = [x for x in noisy_path.glob('*.tiff')] #('*.mat')
filepaths, noisyfilepaths = from_HOLODEEP(hparams.noise_src_dir, hparams.clean_src_dir, hparams.train_noise, hparams.train_patterns, path_only=True)
if isDebug:
filepaths = filepaths[:10]
noisyfilepaths = noisyfilepaths[:10]
print(filepaths)
print(noisyfilepaths)
#print(filepaths)
#print(noisyfilepaths)
#exit()
print("number of training data %d" % len(filepaths))
print("number of clean training data {0} and noisy {1}".format( len(filepaths), len(noisyfilepaths)))
scales = hparams.scales #old version [1, 0.9, 0.8, 0.7]
# calculate the number of patches
......@@ -53,45 +62,52 @@ def generate_patches(isDebug=True):
#for s in range(len(scales)):
# new_size = (int(origin_size[0] * scales[s]), int(origin_size[1] * scales[s]))
count = 0
count1 = 0
d = dict()
for i in range(len(filepaths)):
img = Image.open(filepaths[i]).convert('L') # convert RGB to gray, no need to convert: grayscale
#print(filepaths[i])
#img = Image.open(filepaths[i]).convert('L') # convert RGB to gray, no need to convert: grayscale
for s in range(len(scales)):
newsize = (int(img.size[0] * scales[s]), int(img.size[1] * scales[s]))
newsize = (int(hparams.originalsize[0] * scales[s]), int(hparams.originalsize[1] * scales[s]))
d[newsize]=d.get(newsize,0)+1
img_s = img.resize(newsize, resample=PIL.Image.BICUBIC) # do not change the original img
print(img_s.size, img.size, newsize)
im_h, im_w = img_s.size
for x in range(0 + hparams.step, (im_h - hparams.patch_size), hparams.stride):
for y in range(0 + hparams.step, (im_w - hparams.patch_size), hparams.stride):
count += 1
print("Count scales = ", count)
#pas besoin de reconstruire l'image vraiment, on a juste besoin de sa dimension
#img_s = img.resize(newsize, resample=PIL.Image.BICUBIC) # do not change the original img
#print(img.size, newsize)
im_h, im_w = newsize # img_s.size
#for x in range(0 + hparams.step, (im_h - hparams.patch_size), hparams.stride):
# for y in range(0 + hparams.step, (im_w - hparams.patch_size), hparams.stride):
# count += 1
count1 += int((im_h-hparams.patch_size)/hparams.stride) * int((im_w-hparams.patch_size)/hparams.stride)
count = len(scales)*len(filepaths)*hparams.patch_per_image
print("total number of patches for all taining images = ", count1, " and used patches = ", count)
origin_patch_num = count * DATA_AUG_TIMES
for size in d:
print("%i images in size %s" % (d[size], size))
if origin_patch_num % hparams.batch_size != 0:
numPatches = (origin_patch_num / hparams.batch_size + 1) * hparams.batch_size
#if origin_patch_num > hparams.batch_size:
numPatches = int((origin_patch_num / hparams.batch_size + 1) * hparams.batch_size)
else:
numPatches = origin_patch_num
numPatches = int(numPatches)
#numPatches = int(numPatches)
print ("total patches = %d , batch size = %d, total batches = %d" % (numPatches, hparams.batch_size, numPatches / hparams.batch_size))
# data matrix 4-D
inputs = np.zeros((numPatches, hparams.patch_size, hparams.patch_size, 1), dtype="uint8")
noisyinputs = np.zeros((numPatches, hparams.patch_size, hparams.patch_size, 1), dtype="uint8")
print("Shape of input (including noisy) : ", inputs.shape)
count = 0
cpt_img_scale = 0
# generate patches
for i in range(len(filepaths)):
img = Image.open(filepaths[i]).convert('L')
#print(filepaths[i])
img = Image.open(filepaths[i]).convert('L')#import matlab image img = loadmat(filepaths[i]) ? TO CHECK
noisyimg = Image.open(noisyfilepaths[i]).convert('L') # convert RGB to gray, no need to convert: grayscale
#img = filepaths[i]
#noisyimg = noisyfilepaths[i]
for s in range(len(scales)):
newsize = (int(img.size[0] * scales[s]), int(img.size[1] * scales[s]))
# print newsize
......@@ -102,27 +118,56 @@ def generate_patches(isDebug=True):
for j in range(DATA_AUG_TIMES):
im_h, im_w, _ = img_s.shape
cpt = 0
numPatch_per_img = int((im_h-hparams.patch_size)/hparams.stride) * int((im_w-hparams.patch_size)/hparams.stride)
inputs_img_scale = np.zeros((numPatch_per_img, hparams.patch_size, hparams.patch_size, 1), dtype = 'uint8')
noisyinputs_img_scale = np.zeros((numPatch_per_img, hparams.patch_size, hparams.patch_size, 1), dtype = 'uint8')
for x in range(0 + hparams.step, im_h - hparams.patch_size, hparams.stride):
for y in range(0 + hparams.step, im_w - hparams.patch_size, hparams.stride):
#en fonction du mode : 0 normal, 1 flip up/down, 2 rotate 90, ..
#inputs[count, :, :, :] = data_augmentation(img_s[x:x + args.pat_size, y:y + args.pat_size, :], np.random.randint(0, 7))
#du coup je veux juste version normale de l'image
inputs[count, :, :, :] = img_s[x:x + hparams.patch_size, y:y + hparams.patch_size, :]
noisyinputs[count, :, :, :] = noisyimg_s[x: x + hparams.patch_size, y:y + hparams.patch_size, :]
count += 1
print("Count patches = ", count)
# pad the batch
inputs_img_scale[cpt,: ,: ,:] = img_s[x:x + hparams.patch_size, y:y + hparams.patch_size, :]
noisyinputs_img_scale[cpt,: ,: ,:] = noisyimg_s[x:x + hparams.patch_size, y:y + hparams.patch_size, :]
cpt += 1
#shuffle the different patches of an image similarly on noisy and clean images
perm_idx = np.random.permutation(cpt)[:hparams.patch_per_image]
#print("perm_idx", perm_idx.shape, perm_idx)
inputs[cpt_img_scale: cpt_img_scale + hparams.patch_per_image, :, :, :] = inputs_img_scale[perm_idx, :, :, :]
noisyinputs[cpt_img_scale:cpt_img_scale + hparams.patch_per_image, :, :, :] = noisyinputs_img_scale[perm_idx, :, :, :]
cpt_img_scale += 1
del img, noisyimg
#if hparams.phase_type == 'phi':
# rdm = None
#else:
# rdm = np.random.randint(0, 2, inputs.shape[0])
#inputs_n = normalize_data(inputs, hparams.phase_type, rdm)
#noisyinputs_n = normalize_data(noisyinputs, hparams.phase_type, rdm)
#print("Count total nb of patches = ", cpt_img_scale * hparams.patch_per_image)
# pad the batch (on complete le dernier batch avec les premiers inputs
if count < numPatches:
to_pad = numPatches - count
print('Nb of patches added for padding to batch size: ', to_pad)
inputs[-to_pad:, :, :, :] = inputs[:to_pad, :, :, :]
noisyinputs[-to_pad:, :, :, :] = noisyinputs[:to_pad, :, :, :]
#check input images
#import matplotlib.pyplot as plt
#plt.imsave('test0_clean', inputs[0,: ,:,0], cmap = 'Greys')
#plt.imsave('test0_noisy', noisyinputs[0,: ,:,0], cmap = 'Greys')
print('shape of inputs: ', inputs.shape)
print('amplitude of inputs: ', np.max(inputs), np.min(inputs))
sess_name = extract_sess_name(hparams.train_patterns, hparams.train_noise, hparams.phase_type)
if not os.path.exists(args.save_dir):
os.mkdir(args.save_dir)
np.save(os.path.join(args.save_dir, "img_clean_pats"), inputs)
np.save(os.path.join(args.save_dir, "img_noisy_pats"), noisyinputs)
np.save(os.path.join(args.save_dir, "img_clean_train_" + sess_name), inputs)
np.save(os.path.join(args.save_dir, "img_noisy_train_" + sess_name), noisyinputs)
print("size of inputs tensor = " + str(inputs.shape))
if __name__ == '__main__':
generate_patches(True)
generate_patches(hparams.isDebug)
import argparse
#import re
#import glob
import pathlib
import os, sys
from PIL import Image
import PIL
#import random
import numpy as np
from utils import *
from hparams import hparams, hparams_debug_string
from scipy.io import loadmat
# the pixel value range is '0-255'(uint8 ) of training data
# macro
#DATA_AUG_TIMES = 1 # transform a sample to a different sample for DATA_AUG_TIMES times
parser = argparse.ArgumentParser(description='')
#parser.add_argument('--clean_src_dir', dest='clean_src_dir', default='/lium/raid01_c/tahon/holography/NOISEFREEHOLODEEP', help='dir of clean data')
#parser.add_argument('--noise_src_dir', dest='noise_src_dir', default='/lium/raid01_c/tahon/holography/HOLODEEP', help='dir of noisy data')
#parser.add_argument('--train_image', dest='train_patterns', default=hparams.train_patterns, help='patterns of images for training')
#parser.add_argument('--train_noise', dest='train_noise', default=hparams.train_noise, help='noise values for training images')
parser.add_argument('--save_dir', dest='save_dir', default='./data1', help='dir of patches')
#parser.add_argument('--patch_size', dest='pat_size', type=int, default=hparams.patch_size, help='patch size')#50 for RGB and 70 for grayscale
#parser.add_argument('--stride', dest='stride', type=int, default=hparams.stride, help='stride')
#parser.add_argument('--step', dest='step', type=int, default=hparams.step, help='step')
parser.add_argument('--params', dest='params', type=str, default='', help='hyper parameters')
# check output arguments
#parser.add_argument('--from_file', dest='from_file', default="./data/img_clean_pats.npy", help='get pic from file')
#parser.add_argument('--num_pic', dest='num_pic', type=int, default=10, help='number of pic to pick')
args = parser.parse_args()
#print(args.params['patch_size'])
hparams.parse(args.params)
import ipdb
def generate_patches(isDebug=True):
#global DATA_AUG_TIMES = 1
#hparams.patch_size = args.pat_size
print(hparams_debug_string())
#filepaths = [x for x in src_path.glob('*.tiff')] #('*.mat')
#noisyfilepaths = [x for x in noisy_path.glob('*.tiff')] #('*.mat')
cleanmat, noisymat = from_DATABASE(hparams.eval_dir, hparams.train_noise, hparams.train_patterns)
#ipdb.set_trace()
print("number of clean training data {0} and noisy {1}".format( len(cleanmat), len(noisymat)))
scales = 1 #et on ne le bouge pas !!!! hparams.scales #old version [1, 0.9, 0.8, 0.7]
nb_image = len(cleanmat)
nb_patch_per_image = int((hparams.originalsize[0] - hparams.patch_size)/hparams.stride + 1) * int((hparams.originalsize[1] - hparams.patch_size)/hparams.stride + 1) #(1024 - 50)/50 + 1 = 20 -> 20*20 = 400 patch per img
nb_origin_patch = nb_patch_per_image * nb_image
nb_final_patch = hparams.patch_per_image * nb_image
print("total number of patches for all taining images = ", nb_origin_patch, " and used patches = ", nb_final_patch)
if nb_final_patch % hparams.batch_size != 0:
#if origin_patch_num > hparams.batch_size:
numPatches = int(nb_final_patch / hparams.batch_size + 1) * hparams.batch_size
else:
numPatches = nb_final_patch
print ("total patches = %d , batch size = %d, total batches = %d" % (numPatches, hparams.batch_size, numPatches / hparams.batch_size))
# data matrix 4-D
cleaninputs = np.zeros((numPatches, hparams.patch_size, hparams.patch_size, 1))
noisyinputs = np.zeros((numPatches, hparams.patch_size, hparams.patch_size, 1))
print("Shape of input (including noisy) : ", cleaninputs.shape)
#ipdb.set_trace()
cpt_img_scale = 0
# generate patches
for i in range(nb_image):
cleanimg = cleanmat[i] ##import matlab image img = loadmat(filepaths[i]) ? TO CHECK
#noisyimg = Image.open(noisyfilepaths[i]).convert('L') # convert RGB to gray, no need to convert: grayscale
noisyimg = noisymat[i] ##import matlab image img = loadmat(filepaths[i]) ? TO CHECK
#for s in range(len(scales)):
# newsize = (int(img.size[0] * scales[s]), int(img.size[1] * scales[s]))
# # print newsize
# img_s = img.resize(newsize, resample=PIL.Image.BICUBIC)
# img_s = np.reshape(np.array(img_s, dtype="uint8"), (img_s.size[0], img_s.size[1], 1)) # extend one dimension
# noisyimg_s = noisyimg.resize(newsize, resample=PIL.Image.BICUBIC)
# noisyimg_s = np.reshape(np.array(noisyimg_s, dtype="uint8"), (noisyimg_s.size[0], noisyimg_s.size[1], 1)) # extend one dimension
# for j in range(DATA_AUG_TIMES):
# im_h, im_w, _ = img_s.shape
cpt = 0
inputs_img_scale = np.zeros((nb_patch_per_image, hparams.patch_size, hparams.patch_size, 1))
noisyinputs_img_scale = np.zeros((nb_patch_per_image, hparams.patch_size, hparams.patch_size, 1))
for x in range(0 + hparams.step, hparams.originalsize[0] - hparams.patch_size, hparams.stride):
for y in range(0 + hparams.step, hparams.originalsize[1] - hparams.patch_size, hparams.stride):
#print(x,y)
#en fonction du mode : 0 normal, 1 flip up/down, 2 rotate 90, ..
#inputs[count, :, :, :] = data_augmentation(img_s[x:x + args.pat_size, y:y + args.pat_size, :], np.random.randint(0, 7))
#du coup je veux juste version normale de l'image
inputs_img_scale[cpt,: ,: ,:] = cleanimg[:, x:x + hparams.patch_size, y:y + hparams.patch_size, :]
noisyinputs_img_scale[cpt,: ,: ,:] = noisyimg[:, x:x + hparams.patch_size, y:y + hparams.patch_size, :]
cpt += 1
#shuffle the different patches of an image similarly on noisy and clean images
perm_idx = np.random.permutation(cpt)[:hparams.patch_per_image]
#print("perm_idx", perm_idx.shape, perm_idx)
cleaninputs[cpt_img_scale: cpt_img_scale + hparams.patch_per_image, :, :, :] = inputs_img_scale[perm_idx, :, :, :]
noisyinputs[cpt_img_scale:cpt_img_scale + hparams.patch_per_image, :, :, :] = noisyinputs_img_scale[perm_idx, :, :, :]
cpt_img_scale += hparams.patch_per_image
#del img, noisyimg
#if hparams.phase_type == 'phi':
# rdm = None
#else:
# rdm = np.random.randint(0, 2, inputs.shape[0])
#inputs_n = normalize_data(inputs, hparams.phase_type, rdm)
#noisyinputs_n = normalize_data(noisyinputs, hparams.phase_type, rdm)
#ipdb.set_trace()
#print("Count total nb of patches = ", cpt_img_scale * hparams.patch_per_image)
# pad the batch (on complete le dernier batch avec les premiers inputs
if nb_final_patch < numPatches:
to_pad = numPatches - nb_final_patch
print('Nb of patches added for padding to batch size: ', to_pad)
cleaninputs[-to_pad:, :, :, :] = cleaninputs[:to_pad, :, :, :]
noisyinputs[-to_pad:, :, :, :] = noisyinputs[:to_pad, :, :, :]
#check input images
#import matplotlib.pyplot as plt
#plt.imsave('test0_clean', inputs[0,: ,:,0], cmap = 'Greys')
#plt.imsave('test0_noisy', noisyinputs[0,: ,:,0], cmap = 'Greys')
print('shape of inputs: ', cleaninputs.shape)
print('amplitude of inputs: ', np.max(cleaninputs), np.min(cleaninputs))
sess_name = extract_sess_name(hparams.train_patterns, hparams.train_noise, hparams.phase_type)
if not os.path.exists(args.save_dir):
os.mkdir(args.save_dir)
np.save(os.path.join(args.save_dir, "img_clean_train_" + sess_name), cleaninputs)
np.save(os.path.join(args.save_dir, "img_noisy_train_" + sess_name), noisyinputs)
print("size of inputs tensor = " + str(cleaninputs.shape))
if __name__ == '__main__':
generate_patches(hparams.isDebug)
......@@ -2,14 +2,34 @@ import tensorflow as tf
# Default hyperparameters:
hparams = tf.contrib.training.HParams(
noise_src_dir = '/lium/raid01_c/tahon/holography/HOLODEEP/',
clean_src_dir = '/lium/raid01_c/tahon/holography/NOISEFREEHOLODEEP/',
eval_dir = '/lium/raid01_c/tahon/holography/HOLODEEPmat/',
phase = 'train', #train or test phase
#image
isDebug = False, #True,#reate only 10 patches
originalsize = (1024,1024), #1024 for matlab database, 128 for holodeep database
phase_type = 'phi', #keep phase between -pi and pi (phi), convert into cosinus (cos) or sinus (sin)
#select images for training
train_patterns = [1, 2, 3, 5], #number of images from 1 to 5
train_noise = [0, 1, 1.5],
#select images for evaluation (during training)
eval_patterns = [4],
eval_noise = [0, 1, 1.5, 2, 2.5],
#select images for testing
test_patterns = [5],
test_noise = [0, 1, 1.5],
noise_type = 'spkl', #type of noise: speckle or gaussian (spkl|gauss)
sigma = 25, #noise level for gaussian denoising
#Training
batch_size = 128,
batch_size = 64,#128
patch_per_image = 350, # Silvio a utilisé 384 pour des images 1024*1024
patch_size = 50, #70 for grayscale images 50 for colour images.
epoch = 2000,
stride = 1, # spatial step for cropping images values from initial script 10
epoch = 200,#2000
lr = 0.0005, # learning rate
stride = 50, # spatial step for cropping images values from initial script 10
step = 0, #initial spatial setp for cropping
scales = [1] #[1, 0.9, 0.8, 0.7] # scale for data augmentation
)
......
import argparse
from glob import glob
import tensorflow as tf
from model import denoiser
from utils import *
parser = argparse.ArgumentParser(description='')
parser.add_argument('--epoch', dest='epoch', type=int, default=50, help='# of epoch')
parser.add_argument('--batch_size', dest='batch_size', type=int, default=128, help='# images in batch')
parser.add_argument('--lr', dest='lr', type=float, default=0.001, help='initial learning rate for adam')
parser.add_argument('--use_gpu', dest='use_gpu', type=int, default=1, help='gpu flag, 1 for GPU and 0 for CPU')
parser.add_argument('--sigma', dest='sigma', type=int, default=25, help='noise level')
parser.add_argument('--phase', dest='phase', default='train', help='train or test')
parser.add_argument('--checkpoint_dir', dest='ckpt_dir', default='./checkpoint', help='models are saved here')
parser.add_argument('--sample_dir', dest='sample_dir', default='./sample', help='sample are saved here')
parser.add_argument('--test_dir', dest='test_dir', default='./test', help='test sample are saved here')
parser.add_argument('--eval_set', dest='eval_set', default='Set12', help='dataset for eval in training')
parser.add_argument('--test_set', dest='test_set', default='BSD68', help='dataset for testing')
args = parser.parse_args()
def denoiser_train(denoiser, lr):
with load_data(filepath='./data/img_clean_pats.npy') as data:
# if there is a small memory, please comment this line and uncomment the line99 in model.py
data = data.astype(np.float32) / 255.0 # normalize the data to 0-1
eval_files = glob('./data/test/{}/*.png'.format(args.eval_set))
eval_data = load_images(eval_files) # list of array of different size, 4-D, pixel value range is 0-255
denoiser.train(data, eval_data, batch_size=args.batch_size, ckpt_dir=args.ckpt_dir, epoch=args.epoch, lr=lr,
sample_dir=args.sample_dir)
def denoiser_test(denoiser):
test_files = glob('./data/test/{}/*.png'.format(args.test_set))
denoiser.test(test_files, ckpt_dir=args.ckpt_dir, save_dir=args.test_dir)
def main(_):
if not os.path.exists(args.ckpt_dir):
os.makedirs(args.ckpt_dir)
if not os.path.exists(args.sample_dir):
os.makedirs(args.sample_dir)
if not os.path.exists(args.test_dir):
os.makedirs(args.test_dir)
lr = args.lr * np.ones([args.epoch])
lr[30:] = lr[0] / 10.0
if args.use_gpu:
# added to control the gpu memory
print("GPU\n")
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
model = denoiser(sess, sigma=args.sigma)
if args.phase == 'train':
denoiser_train(model, lr=lr)
elif args.phase == 'test':
denoiser_test(model)
else:
print('[!]Unknown phase')
exit(0)
else:
print("CPU\n")
with tf.Session() as sess:
model = denoiser(sess, sigma=args.sigma)
if args.phase == 'train':
denoiser_train(model, lr=lr)
elif args.phase == 'test':
denoiser_test(model)
else:
print('[!]Unknown phase')
exit(0)
if __name__ == '__main__':
tf.app.run()
......@@ -7,29 +7,55 @@ from hparams import hparams, hparams_debug_string
from model import denoiser
from utils import *
#import ipdb
parser = argparse.ArgumentParser(description='')
parser.add_argument('--epoch', dest='epoch', type=int, default=50, help='# of epoch')
parser.add_argument('--batch_size', dest='batch_size', type=int, default=128, help='# images in batch')
parser.add_argument('--lr', dest='lr', type=float, default=0.001, help='initial learning rate for adam')
#parser.add_argument('--epoch', dest='epoch', type=int, default=, help='# of epoch')
#parser.add_argument('--batch_size', dest='batch_size', type=int, default=128, help='# images in batch')
#parser.add_argument('--lr', dest='lr', type=float, default=0.001, help='initial learning rate for adam')
parser.add_argument('--use_gpu', dest='use_gpu', type=int, default=1, help='gpu flag, 1 for GPU and 0 for CPU')
parser.add_argument('--sigma', dest='sigma', type=int, default=25, help='noise level')
parser.add_argument('--phase', dest='phase', default='train', help='train or test')
#parser.add_argument('--sigma', dest='sigma', type=int, default=25, help='noise level')
#parser.add_argument('--phase', dest='phase', default='train', help='train or test')
parser.add_argument('--checkpoint_dir', dest='ckpt_dir', default='./checkpoint', help='models are saved here')
parser.add_argument('--sample_dir', dest='sample_dir', default='./sample', help='sample are saved here')
#parser.add_argument('--clean_src_dir', dest='clean_src_dir', default='/lium/raid01_c/tahon/holography/NOISEFREEHOLODEEP', help='dir of clean data')
#parser.add_argument('--noise_src_dir', dest='noise_src_dir', default='/lium/raid01_c/tahon/holography/HOLODEEP', help='dir of noisy data')
parser.add_argument('--test_dir', dest='test_dir', default='./test', help='test sample are saved here')
parser.add_argument('--eval_set', dest='eval_set', default='Set12', help='dataset for eval in training')