Commit 89444a0c authored by Marie Tahon's avatar Marie Tahon
Browse files

add noisy image treatement in preprocess + try to include noisy images in main.py

parent 78d872ce
......@@ -6,15 +6,11 @@ from PIL import Image
import PIL
#import random
import numpy as np
print('1--')
from utils import *
print('1--')
from hparams import hparams, hparams_debug_string
print('3--')
# the pixel value range is '0-255'(uint8 ) of training data
print('--')
# macro
DATA_AUG_TIMES = 1 # transform a sample to a different sample for DATA_AUG_TIMES times
......@@ -22,7 +18,7 @@ parser = argparse.ArgumentParser(description='')
parser.add_argument('--clean_src_dir', dest='clean_src_dir', default='/lium/raid01_c/tahon/holography/NOISEFREEHOLODEEP', help='dir of clean data')
parser.add_argument('--noise_src_dir', dest='noise_src_dir', default='/lium/raid01_c/tahon/holography/HOLODEEP', help='dir of noisy data')
parser.add_argument('--save_dir', dest='save_dir', default='./data', help='dir of patches')
parser.add_argument('--patch_size', dest='pat_size', type=int, default=70, help='patch size')#change 40 in 70 why ?
parser.add_argument('--patch_size', dest='pat_size', type=int, default=50, help='patch size')#50 for RGB and 70 for grayscale
parser.add_argument('--stride', dest='stride', type=int, default=10, help='stride')
parser.add_argument('--step', dest='step', type=int, default=0, help='step')
#parser.add_argument('--batch_size', dest='bat_size', type=int, default=128, help='batch size')
......@@ -30,13 +26,14 @@ parser.add_argument('--step', dest='step', type=int, default=0, help='step')
parser.add_argument('--from_file', dest='from_file', default="./data/img_clean_pats.npy", help='get pic from file')
#parser.add_argument('--num_pic', dest='num_pic', type=int, default=10, help='number of pic to pick')
args = parser.parse_args()
print('--')
#hparams.parse(args["--hparams"])
def generate_patches(isDebug=True):
#global DATA_AUG_TIMES = 1
hparams.patch_size = args.pat_size
print(hparams_debug_string())
count = 0
src_path = pathlib.Path(args.clean_src_dir)
noisy_path = pathlib.Path(args.noise_src_dir)
filepaths = [x for x in src_path.glob('*.tiff')]
......@@ -46,10 +43,9 @@ def generate_patches(isDebug=True):
noisyfilepaths = noisyfilepaths[:10]
print(filepaths)
print(noisyfilepaths)
exit()
#exit()
print("number of training data %d" % len(filepaths))
print('--')
scales = [1, 0.9, 0.8, 0.7]
scales = hparams.scales #old version [1, 0.9, 0.8, 0.7]
# calculate the number of patches
#we assume that all images have the same size
......@@ -58,18 +54,20 @@ def generate_patches(isDebug=True):
# new_size = (int(origin_size[0] * scales[s]), int(origin_size[1] * scales[s]))
count = 0
d = dict()
for i in range(len(filepaths)):
img = Image.open(filepaths[i]) #.convert('L') # convert RGB to gray, no need to convert: grayscale
img = Image.open(filepaths[i]).convert('L') # convert RGB to gray, no need to convert: grayscale
for s in range(len(scales)):
newsize = (int(img.size[0] * scales[s]), int(img.size[1] * scales[s]))
d[newsize]=d.get(newsize,0)+1
img_s = img.resize(newsize, resample=PIL.Image.BICUBIC) # do not change the original img
print(img_s.size, img.size, newsize)
im_h, im_w = img_s.size
for x in range(0 + args.step, (im_h - hparams.patch_size), args.stride):
for y in range(0 + args.step, (im_w - hparams.patch_size), args.stride):
for x in range(0 + hparams.step, (im_h - hparams.patch_size), hparams.stride):
for y in range(0 + hparams.step, (im_w - hparams.patch_size), hparams.stride):
count += 1
print("Count scales = ", count)
origin_patch_num = count * DATA_AUG_TIMES
for size in d:
......@@ -85,40 +83,46 @@ def generate_patches(isDebug=True):
# data matrix 4-D
inputs = np.zeros((numPatches, hparams.patch_size, hparams.patch_size, 1), dtype="uint8")
print("Shape of input : ", inputs.shape)
noisyinputs = np.zeros((numPatches, hparams.patch_size, hparams.patch_size, 1), dtype="uint8")
print("Shape of input (including noisy) : ", inputs.shape)
count = 0
# generate patches
for i in range(len(filepaths)):
img = Image.open(filepaths[i]).convert('L')
noisyimg = Image.open(noisyfilepaths[i]).convert('L') # convert RGB to gray, no need to convert: grayscale
for s in range(len(scales)):
newsize = (int(img.size[0] * scales[s]), int(img.size[1] * scales[s]))
# print newsize
img_s = img.resize(newsize, resample=PIL.Image.BICUBIC)
img_s = np.reshape(np.array(img_s, dtype="uint8"),
(img_s.size[0], img_s.size[1], 1)) # extend one dimension
img_s = np.reshape(np.array(img_s, dtype="uint8"), (img_s.size[0], img_s.size[1], 1)) # extend one dimension
noisyimg_s = noisyimg.resize(newsize, resample=PIL.Image.BICUBIC)
noisyimg_s = np.reshape(np.array(noisyimg_s, dtype="uint8"), (noisyimg_s.size[0], noisyimg_s.size[1], 1)) # extend one dimension
for j in range(DATA_AUG_TIMES):
im_h, im_w, _ = img_s.shape
for x in range(0 + args.step, im_h - args.pat_size, args.stride):
for y in range(0 + args.step, im_w - args.pat_size, args.stride):
for x in range(0 + hparams.step, im_h - hparams.patch_size, hparams.stride):
for y in range(0 + hparams.step, im_w - hparams.patch_size, hparams.stride):
#en fonction du mode : 0 normal, 1 flip up/down, 2 rotate 90, ..
#inputs[count, :, :, :] = data_augmentation(img_s[x:x + args.pat_size, y:y + args.pat_size, :], np.random.randint(0, 7))
#du coup je veux juste version normale de l'image
inputs[count, :, :, :] = img_s[x:x + args.pat_size, y:y + args.pat_size, :]
inputs[count, :, :, :] = img_s[x:x + hparams.patch_size, y:y + hparams.patch_size, :]
noisyinputs[count, :, :, :] = noisyimg_s[x: x + hparams.patch_size, y:y + hparams.patch_size, :]
count += 1
print("Count = ", count)
print("Count patches = ", count)
# pad the batch
if count < numPatches:
to_pad = numPatches - count
inputs[-to_pad:, :, :, :] = inputs[:to_pad, :, :, :]
noisyinputs[-to_pad:, :, :, :] = noisyinputs[:to_pad, :, :, :]
if not os.path.exists(args.save_dir):
os.mkdir(args.save_dir)
np.save(os.path.join(args.save_dir, "img_clean_pats"), inputs)
np.save(os.path.join(args.save_dir, "img_noisy_pats"), noisyinputs)
print("size of inputs tensor = " + str(inputs.shape))
if __name__ == '__main__':
generate_patches()
generate_patches(True)
......@@ -7,11 +7,11 @@ hparams = tf.contrib.training.HParams(
#Training
batch_size = 128,
patch_size = 70, #70 for grayscale images 50 for colour images.
nepochs = 2000,
stride = 10, # spatial step for cropping images
patch_size = 50, #70 for grayscale images 50 for colour images.
epoch = 2000,
stride = 1, # spatial step for cropping images values from initial script 10
step = 0, #initial spatial setp for cropping
scales = [1] #[1, 0.9, 0.8, 0.7] # scale for data augmentation
)
def hparams_debug_string():
......
......@@ -3,6 +3,7 @@ from glob import glob
import tensorflow as tf
from hparams import hparams, hparams_debug_string
from model import denoiser
from utils import *
......@@ -22,12 +23,12 @@ args = parser.parse_args()
def denoiser_train(denoiser, lr):
with load_data(filepath='./data/img_clean_pats.npy') as data:
with load_data(filepath='./data/img_clean_pats.npy', noisyfilepath='./data/img_noisy_pats.npy') as data:
# if there is a small memory, please comment this line and uncomment the line99 in model.py
data = data.astype(np.float32) / 255.0 # normalize the data to 0-1
#data = data.astype(np.float32) / 255.0 # normalize the data to 0-1 -> data normalization is done directly in utils.py
eval_files = glob('./data/test/{}/*.png'.format(args.eval_set))
eval_data = load_images(eval_files) # list of array of different size, 4-D, pixel value range is 0-255
denoiser.train(data, eval_data, batch_size=args.batch_size, ckpt_dir=args.ckpt_dir, epoch=args.epoch, lr=lr,
denoiser.train(data, eval_data, batch_size=hparams.batch_size, ckpt_dir=args.ckpt_dir, epoch=hparams.epoch, lr=lr,
sample_dir=args.sample_dir)
......
import time
import tensorflow as tf
from utils import *
def dncnn(input, is_training=True, output_channels=1):
with tf.variable_scope('block1'):
output = tf.layers.conv2d(input, 64, 3, padding='same', activation=tf.nn.relu)
for layers in xrange(2, 16 + 1):
for layers in range(2, 16 + 1):
with tf.variable_scope('block%d' % layers):
output = tf.layers.conv2d(output, 64, 3, padding='same', name='conv%d' % layers, use_bias=False)
output = tf.nn.relu(tf.layers.batch_normalization(output, training=is_training))
......@@ -21,10 +21,10 @@ class denoiser(object):
self.input_c_dim = input_c_dim
self.sigma = sigma
# build model
self.Y_ = tf.placeholder(tf.float32, [None, None, None, self.input_c_dim],
name='clean_image')
self.Y_ = tf.placeholder(tf.float32, [None, None, None, self.input_c_dim], name='clean_image')# tf.placeholder(dtype, shape= .., name = ..)
self.is_training = tf.placeholder(tf.bool, name='is_training')
self.X = self.Y_ + tf.random_normal(shape=tf.shape(self.Y_), stddev=self.sigma / 255.0) # noisy images
self.X = tf.placeholder(tf.float32, [None, None, None, self.input_c_dim], name = 'noisy_image') # add new line for noisy image not created by gaussian noise addition.
#self.X = self.Y_ + tf.random_normal(shape=tf.shape(self.Y_), stddev=self.sigma / 255.0) # create noisy images with gaussian noise addition
# self.X = self.Y_ + tf.truncated_normal(shape=tf.shape(self.Y_), stddev=self.sigma / 255.0) # noisy images
self.Y = dncnn(self.X, is_training=self.is_training)
self.loss = (1.0 / batch_size) * tf.nn.l2_loss(self.Y_ - self.Y)
......@@ -42,7 +42,7 @@ class denoiser(object):
# assert test_data value range is 0-255
print("[*] Evaluating...")
psnr_sum = 0
for idx in xrange(len(test_data)):
for idx in range(len(test_data)):
clean_image = test_data[idx].astype(np.float32) / 255.0
output_clean_image, noisy_image, psnr_summary = self.sess.run(
[self.Y, self.X, summary_merged],
......@@ -92,9 +92,9 @@ class denoiser(object):
start_time = time.time()
self.evaluate(iter_num, eval_data, sample_dir=sample_dir, summary_merged=summary_psnr,
summary_writer=writer) # eval_data value range is 0-255
for epoch in xrange(start_epoch, epoch):
for epoch in range(start_epoch, epoch):
np.random.shuffle(data)
for batch_id in xrange(start_step, numBatch):
for batch_id in range(start_step, numBatch):
batch_images = data[batch_id * batch_size:(batch_id + 1) * batch_size, :, :, :]
# batch_images = batch_images.astype(np.float32) / 255.0 # normalize the data to 0-1
_, loss, summary = self.sess.run([self.train_op, self.loss, merged],
......@@ -142,7 +142,7 @@ class denoiser(object):
print(" [*] Load weights SUCCESS...")
psnr_sum = 0
print("[*] " + 'noise level: ' + str(self.sigma) + " start testing...")
for idx in xrange(len(test_files)):
for idx in range(len(test_files)):
clean_image = load_images(test_files[idx]).astype(np.float32) / 255.0
output_clean_image, noisy_image = self.sess.run([self.Y, self.X],
feed_dict={self.Y_: clean_image, self.is_training: False})
......
import gc
print('4--')
import os
import sys
print('5--')
import numpy as np
print('6--')
import tensorflow #as tf
print('7--')
import tensorflow as tf
from PIL import Image
print('8--')
def data_augmentation(image, mode):
if mode == 0:
......@@ -41,28 +36,35 @@ def data_augmentation(image, mode):
class train_data():
def __init__(self, filepath='./data/image_clean_pat.npy'):
def __init__(self, filepath='./data/image_clean_pats.npy', noisyfilepath='./data/image_noisy_pats.npy'):
self.filepath = filepath
assert '.npy' in filepath
if not os.path.exists(filepath):
print("[!] Data file not exists")
print("[!] Clean data file not exists")
sys.exit(1)
self.noisyfilepath = noisyfilepath
assert '.npy' in noisyfilepath
if not os.path.exists(noisyfilepath):
print("[!] Noisy data file not exists")
def __enter__(self):
print("[*] Loading data...")
self.data = np.load(self.filepath)
np.random.shuffle(self.data)
self.data = np.load(self.filepath).astype(np.float32) / 255.0 #normalize the data to 0-1
self.noisy = np.load(self.noisyfilepath).astype(np.float32) / 255.0 #normalize the data to 0-1
#np.random.shuffle(self.data)
print("[*] Load successfully...")
return self.data
return self.data, self.noisy
def __exit__(self, type, value, trace):
del self.data
del self.noisy
gc.collect()
print("In __exit__()")
def load_data(filepath='./data/image_clean_pat.npy'):
return train_data(filepath=filepath)
def load_data(filepath='./data/image_clean_pats.npy', noisyfilepath='./data/image_noisy_pats.npy'):
return train_data(filepath=filepath, noisyfilepath=noisyfilepath)
def load_images(filelist):
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment