Commit 8f519968 authored by Touklakos's avatar Touklakos
Browse files

MAJ Sprint 5

parent 0d7735ed
......@@ -76,12 +76,14 @@ def parse():
parser.add_argument('--exp_file', dest='exp_file', type=str, help='experiment file')
parser.add_argument('--nb_iteration', dest='nb_iteration', type=int, default=3, help='number of iteration for de-noising operation')
parser.add_argument('--nb_rotation', dest='nb_rotation', type=int, default=8, help='number of ration for data augmentation')
parser.add_argument('--isDebug', dest='isDebug', action='store_true')
parser.add_argument('--patch_size', dest='patch_size', default=50)
parser.add_argument('--stride', dest='stride', default=50)
parser.add_argument('--step', dest='step', default=0)
parser.add_argument('--freq_save', dest='freq_save', type=int, default=1)
parser.add_argument('--phase_type', dest='phase_type', default="two")
parser.add_argument('--patch_per_image', dest='patch_per_image', default=384)
parser.add_argument('--noise_src_dir', dest='noise_src_dir', default="./chemin/")
......
......@@ -57,7 +57,7 @@ class NoisyBSDSDataset(td.Dataset):
class TrainDataset(NoisyBSDSDataset):
def __init__(self, clean, noisy, image_mode, image_size):
def __init__(self, clean, noisy, image_mode, image_size, nb_rotation=8):
""" Initialize the data loader
Arguments:
......@@ -80,10 +80,8 @@ class TrainDataset(NoisyBSDSDataset):
self.clean = normalize_data(self.clean, 'two', rdm, True)
self.noisy = normalize_data(self.noisy, 'two', rdm, True)
rotation = 8
self.clean = rotate_data(self.clean, rotation)
self.noisy = rotate_data(self.noisy, rotation)
self.clean = rotate_data(self.clean, nb_rotation)
self.noisy = rotate_data(self.noisy, nb_rotation)
print("data_size : ", self.clean.shape)
print("data_type : ", type(self.clean))
......
# -*- coding: utf-8 -*-
#
# This file is part of DnCnn4Holo.
#
# DnCnn4Holo is a python script for phase image denoising.
# Home page: https://git-lium.univ-lemans.fr/tahon/dncnn-tensorflow-holography
#
# Adapted from https://github.com/wbhu/DnCNN-tensorflow by Hu Wenbo
#
# DnCnn4Holo is free software: you can redistribute it and/or modify
# it under the terms of the GNU LLesser General Public License as
# published by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# DnCnn4Holo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DnCnn4Holo. If not, see <http://www.gnu.org/licenses/>.
"""
Copyright 2019-2020 Marie Tahon
:mod:`generate_patches_holo.py` generate patches from .tiff images
"""
import argparse
import argument
#import re
#import glob
import pathlib
import os, sys
from PIL import Image
import PIL
#import random
import numpy as np
from utils import *
#from hparams import hparams, hparams_debug_string
from scipy.io import loadmat
__license__ = "LGPL"
__author__ = "Marie Tahon"
__copyright__ = "Copyright 2019-2020 Marie Tahon"
__maintainer__ = "Marie Tahon"
__email__ = "marie.tahon@univ-lemans.fr"
__status__ = "Production"
#__docformat__ = 'reStructuredText'
# the pixel value range is '0-255'(uint8 ) of training data
# macro
DATA_AUG_TIMES = 1 # transform a sample to a different sample for DATA_AUG_TIMES times
parser = argparse.ArgumentParser(description='')
#parser.add_argument('--clean_src_dir', dest='clean_src_dir', default='/lium/raid01_c/tahon/holography/NOISEFREEHOLODEEP', help='dir of clean data')
#parser.add_argument('--noise_src_dir', dest='noise_src_dir', default='/lium/raid01_c/tahon/holography/HOLODEEP', help='dir of noisy data')
#parser.add_argument('--train_image', dest='train_patterns', default=hparams.train_patterns, help='patterns of images for training')
#parser.add_argument('--train_noise', dest='train_noise', default=hparams.train_noise, help='noise values for training images')
parser.add_argument('--save_dir', dest='save_dir', default='./data1', help='dir of patches')
#parser.add_argument('--patch_size', dest='pat_size', type=int, default=hparams.patch_size, help='patch size')#50 for RGB and 70 for grayscale
#parser.add_argument('--stride', dest='stride', type=int, default=hparams.stride, help='stride')
#parser.add_argument('--step', dest='step', type=int, default=hparams.step, help='step')
parser.add_argument('--params', dest='params', type=str, default='', help='hyper parameters')
# check output arguments
#parser.add_argument('--from_file', dest='from_file', default="./data/img_clean_pats.npy", help='get pic from file')
#parser.add_argument('--num_pic', dest='num_pic', type=int, default=10, help='number of pic to pick')
#args = parser.parse_args()
#print(args.params['patch_size'])
#hparams.parse(args.params)
args = argument.parse()
#def from_DATABASE():
def generate_patches(isDebug=True):
#global DATA_AUG_TIMES = 1
#hparams.patch_size = args.pat_size
# print(hparams_debug_string())
#filepaths, noisyfilepaths = from_HOLODEEP(hparams.noise_src_dir, hparams.clean_src_dir, hparams.train_noise, hparams.train_patterns, path_only=True)
filepaths, noisyfilepaths = from_NATURAL(args.noise_src_dir, args.clean_src_dir, path_only=True)
if isDebug:
filepaths = filepaths[:10]
noisyfilepaths = noisyfilepaths[:10]
#print(filepaths)
#print(noisyfilepaths)
#exit()
print("number of clean training data {0} and noisy {1}".format( len(filepaths), len(noisyfilepaths)))
scales = args.scales #old version [1, 0.9, 0.8, 0.7]
# calculate the number of patches
#we assume that all images have the same size
#origin_size = Image.open(filepaths[0]).size
#for s in range(len(scales)):
# new_size = (int(origin_size[0] * scales[s]), int(origin_size[1] * scales[s]))
count1 = 0
d = dict()
for i in range(len(filepaths)):
#print(filepaths[i])
#img = Image.open(filepaths[i]).convert('L') # convert RGB to gray, no need to convert: grayscale
for s in range(len(scales)):
newsize = (int(args.originalsize[0] * scales[s]), int(args.originalsize[1] * scales[s]))
d[newsize]=d.get(newsize,0)+1
#pas besoin de reconstruire l'image vraiment, on a juste besoin de sa dimension
#img_s = img.resize(newsize, resample=PIL.Image.BICUBIC) # do not change the original img
#print(img.size, newsize)
im_h, im_w = newsize # img_s.size
#for x in range(0 + hparams.step, (im_h - hparams.patch_size), hparams.stride):
# for y in range(0 + hparams.step, (im_w - hparams.patch_size), hparams.stride):
# count += 1
count1 += int((im_h-args.patch_size)/args.stride) * int((im_w-args.patch_size)/args.stride)
count = len(scales)*len(filepaths)*args.patch_per_image
print("total number of patches for all taining images = ", count1, " and used patches = ", count)
origin_patch_num = count * DATA_AUG_TIMES
for size in d:
print("%i images in size %s" % (d[size], size))
if origin_patch_num % args.batch_size != 0:
#if origin_patch_num > hparams.batch_size:
numPatches = int((origin_patch_num / args.batch_size + 1) * args.batch_size)
else:
numPatches = origin_patch_num
#numPatches = int(numPatches)
print ("total patches = %d , batch size = %d, total batches = %d" % (numPatches, args.batch_size, numPatches / args.batch_size))
# data matrix 4-D
inputs = np.zeros((numPatches, args.patch_size, args.patch_size, 1), dtype="uint8")
noisyinputs = np.zeros((numPatches, args.patch_size, args.patch_size, 1), dtype="uint8")
print("Shape of input (including noisy) : ", inputs.shape)
cpt_img_scale = 0
# generate patches
for i in range(len(filepaths)):
#print(filepaths[i])
img = Image.open(filepaths[i]).convert('L')#import matlab image img = loadmat(filepaths[i]) ? TO CHECK
noisyimg = Image.open(noisyfilepaths[i]).convert('L') # convert RGB to gray, no need to convert: grayscale
#print(img.size)
#img = filepaths[i]
#noisyimg = noisyfilepaths[i]
for s in range(len(scales)):
newsize = (int(img.size[0] * scales[s]), int(img.size[1] * scales[s]))
# print newsize
img_s = img.resize(newsize, resample=PIL.Image.BICUBIC)
img_s = np.reshape(np.array(img_s, dtype="uint8"), (img_s.size[0], img_s.size[1], 1)) # extend one dimension
noisyimg_s = noisyimg.resize(newsize, resample=PIL.Image.BICUBIC)
noisyimg_s = np.reshape(np.array(noisyimg_s, dtype="uint8"), (noisyimg_s.size[0], noisyimg_s.size[1], 1)) # extend one dimension
#print(img_s.shape)
for j in range(DATA_AUG_TIMES):
im_h, im_w, _ = img_s.shape
cpt = 0
indPatch_x = range(0 + args.step, im_h - args.patch_size, args.stride)
indPatch_y = range(0 + args.step, im_w - args.patch_size, args.stride)
#numPatch_per_img = int((im_h-hparams.patch_size)/hparams.stride) * int((im_w-hparams.patch_size)/hparams.stride)
numPatch_per_img = len(indPatch_x) * len(indPatch_y)
inputs_img_scale = np.zeros((numPatch_per_img, args.patch_size, args.patch_size, 1), dtype = 'uint8')
noisyinputs_img_scale = np.zeros((numPatch_per_img, args.patch_size, args.patch_size, 1), dtype = 'uint8')
#print(inputs_img_scale.shape)
for x in indPatch_x:
for y in indPatch_y:
#en fonction du mode : 0 normal, 1 flip up/down, 2 rotate 90, ..
#inputs[count, :, :, :] = data_augmentation(img_s[x:x + args.pat_size, y:y + args.pat_size, :], np.random.randint(0, 7))
#du coup je veux juste version normale de l'image
inputs_img_scale[cpt,: ,: ,:] = img_s[x:x + args.patch_size, y:y + args.patch_size, :]
noisyinputs_img_scale[cpt,: ,: ,:] = noisyimg_s[x:x + args.patch_size, y:y + args.patch_size, :]
cpt += 1
#shuffle the different patches of an image similarly on noisy and clean images
perm_idx = np.random.permutation(cpt)[:args.patch_per_image]
#print("perm_idx", perm_idx.shape, perm_idx)
inputs[cpt_img_scale: cpt_img_scale + args.patch_per_image, :, :, :] = inputs_img_scale[perm_idx, :, :, :]
noisyinputs[cpt_img_scale:cpt_img_scale + args.patch_per_image, :, :, :] = noisyinputs_img_scale[perm_idx, :, :, :]
cpt_img_scale += 1
del img, noisyimg
#if hparams.phase_type == 'phi':
# rdm = None
#else:
# rdm = np.random.randint(0, 2, inputs.shape[0])
#inputs_n = normalize_data(inputs, hparams.phase_type, rdm)
#noisyinputs_n = normalize_data(noisyinputs, hparams.phase_type, rdm)
#print("Count total nb of patches = ", cpt_img_scale * hparams.patch_per_image)
# pad the batch (on complete le dernier batch avec les premiers inputs
if count < numPatches:
to_pad = numPatches - count
print('Nb of patches added for padding to batch size: ', to_pad)
inputs[-to_pad:, :, :, :] = inputs[:to_pad, :, :, :]
noisyinputs[-to_pad:, :, :, :] = noisyinputs[:to_pad, :, :, :]
#check input images
#import matplotlib.pyplot as plt
#plt.imsave('test0_clean', inputs[0,: ,:,0], cmap = 'Greys')
#plt.imsave('test0_noisy', noisyinputs[0,: ,:,0], cmap = 'Greys')
print('shape of inputs: ', inputs.shape)
print('amplitude of inputs: ', np.max(inputs), np.min(inputs))
sess_name = extract_sess_name(args.train_patterns, args.train_noise, args.phase_type, args.stride , args.patch_size, args.patch_per_image)
if not os.path.exists(args.save_dir):
os.mkdir(args.save_dir)
np.save(os.path.join(args.save_dir, "img_clean_train_" + sess_name), inputs)
np.save(os.path.join(args.save_dir, "img_noisy_train_" + sess_name), noisyinputs)
print("size of inputs tensor = " + str(inputs.shape))
if __name__ == '__main__':
generate_patches(args.isDebug)
# -*- coding: utf-8 -*-
#
# This file is part of DnCnn4Holo.
#
# DnCnn4Holo is a python script for phase image denoising.
# Home page: https://git-lium.univ-lemans.fr/tahon/dncnn-tensorflow-holography
#
# Adapted from https://github.com/wbhu/DnCNN-tensorflow by Hu Wenbo
#
# DnCnn4Holo is free software: you can redistribute it and/or modify
# it under the terms of the GNU LLesser General Public License as
# published by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# DnCnn4Holo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DnCnn4Holo. If not, see <http://www.gnu.org/licenses/>.
"""
Copyright 2019-2020 Marie Tahon
:mod:`generate_patches_holo.py` generate patches from .tiff images
"""
import argparse
import argument
#import re
#import glob
import pathlib
import os, sys
from PIL import Image
import PIL
#import random
import numpy as np
from utils import *
#from hparams import hparams, hparams_debug_string
from scipy.io import loadmat
__license__ = "LGPL"
__author__ = "Marie Tahon"
__copyright__ = "Copyright 2019-2020 Marie Tahon"
__maintainer__ = "Marie Tahon"
__email__ = "marie.tahon@univ-lemans.fr"
__status__ = "Production"
#__docformat__ = 'reStructuredText'
# the pixel value range is '0-255'(uint8 ) of training data
# macro
DATA_AUG_TIMES = 1 # transform a sample to a different sample for DATA_AUG_TIMES times
parser = argparse.ArgumentParser(description='')
#parser.add_argument('--clean_src_dir', dest='clean_src_dir', default='/lium/raid01_c/tahon/holography/NOISEFREEHOLODEEP', help='dir of clean data')
#parser.add_argument('--noise_src_dir', dest='noise_src_dir', default='/lium/raid01_c/tahon/holography/HOLODEEP', help='dir of noisy data')
#parser.add_argument('--train_image', dest='train_patterns', default=hparams.train_patterns, help='patterns of images for training')
#parser.add_argument('--train_noise', dest='train_noise', default=hparams.train_noise, help='noise values for training images')
parser.add_argument('--save_dir', dest='save_dir', default='./data1', help='dir of patches')
#parser.add_argument('--patch_size', dest='pat_size', type=int, default=hparams.patch_size, help='patch size')#50 for RGB and 70 for grayscale
#parser.add_argument('--stride', dest='stride', type=int, default=hparams.stride, help='stride')
#parser.add_argument('--step', dest='step', type=int, default=hparams.step, help='step')
parser.add_argument('--params', dest='params', type=str, default='', help='hyper parameters')
# check output arguments
#parser.add_argument('--from_file', dest='from_file', default="./data/img_clean_pats.npy", help='get pic from file')
#parser.add_argument('--num_pic', dest='num_pic', type=int, default=10, help='number of pic to pick')
#args = parser.parse_args()
#print(args.params['patch_size'])
#hparams.parse(args.params)
args = argument.parse()
#def from_DATABASE():
def generate_patches(isDebug=True):
#global DATA_AUG_TIMES = 1
#hparams.patch_size = args.pat_size
# print(hparams_debug_string())
#filepaths, noisyfilepaths = from_HOLODEEP(hparams.noise_src_dir, hparams.clean_src_dir, hparams.train_noise, hparams.train_patterns, path_only=True)
filepaths, noisyfilepaths = from_NATURAL(args.noise_src_dir, args.clean_src_dir, path_only=True)
if isDebug:
filepaths = filepaths[:10]
noisyfilepaths = noisyfilepaths[:10]
#print(filepaths)
#print(noisyfilepaths)
#exit()
print("number of clean training data {0} and noisy {1}".format( len(filepaths), len(noisyfilepaths)))
scales = args.scales #old version [1, 0.9, 0.8, 0.7]
# calculate the number of patches
#we assume that all images have the same size
#origin_size = Image.open(filepaths[0]).size
#for s in range(len(scales)):
# new_size = (int(origin_size[0] * scales[s]), int(origin_size[1] * scales[s]))
count1 = 0
d = dict()
for i in range(len(filepaths)):
#print(filepaths[i])
#img = Image.open(filepaths[i]).convert('L') # convert RGB to gray, no need to convert: grayscale
for s in range(len(scales)):
newsize = (int(args.originalsize[0] * scales[s]), int(args.originalsize[1] * scales[s]))
d[newsize]=d.get(newsize,0)+1
#pas besoin de reconstruire l'image vraiment, on a juste besoin de sa dimension
#img_s = img.resize(newsize, resample=PIL.Image.BICUBIC) # do not change the original img
#print(img.size, newsize)
im_h, im_w = newsize # img_s.size
#for x in range(0 + hparams.step, (im_h - hparams.patch_size), hparams.stride):
# for y in range(0 + hparams.step, (im_w - hparams.patch_size), hparams.stride):
# count += 1
count1 += int((im_h-args.patch_size)/args.stride) * int((im_w-args.patch_size)/args.stride)
count = len(scales)*len(filepaths)*args.patch_per_image
print("total number of patches for all taining images = ", count1, " and used patches = ", count)
origin_patch_num = count * DATA_AUG_TIMES
for size in d:
print("%i images in size %s" % (d[size], size))
if origin_patch_num % args.batch_size != 0:
#if origin_patch_num > hparams.batch_size:
numPatches = int((origin_patch_num / args.batch_size + 1) * args.batch_size)
else:
numPatches = origin_patch_num
#numPatches = int(numPatches)
print ("total patches = %d , batch size = %d, total batches = %d" % (numPatches, args.batch_size, numPatches / args.batch_size))
# data matrix 4-D
inputs = np.zeros((numPatches, args.patch_size, args.patch_size, 1), dtype="uint8")
noisyinputs = np.zeros((numPatches, args.patch_size, args.patch_size, 1), dtype="uint8")
print("Shape of input (including noisy) : ", inputs.shape)
cpt_img_scale = 0
# generate patches
for i in range(len(filepaths)):
#print(filepaths[i])
img = Image.open(filepaths[i]).convert('L')#import matlab image img = loadmat(filepaths[i]) ? TO CHECK
noisyimg = Image.open(noisyfilepaths[i]).convert('L') # convert RGB to gray, no need to convert: grayscale
#print(img.size)
#img = filepaths[i]
#noisyimg = noisyfilepaths[i]
for s in range(len(scales)):
newsize = (int(img.size[0] * scales[s]), int(img.size[1] * scales[s]))
# print newsize
img_s = img.resize(newsize, resample=PIL.Image.BICUBIC)
img_s = np.reshape(np.array(img_s, dtype="uint8"), (img_s.size[0], img_s.size[1], 1)) # extend one dimension
noisyimg_s = noisyimg.resize(newsize, resample=PIL.Image.BICUBIC)
noisyimg_s = np.reshape(np.array(noisyimg_s, dtype="uint8"), (noisyimg_s.size[0], noisyimg_s.size[1], 1)) # extend one dimension
#print(img_s.shape)
for j in range(DATA_AUG_TIMES):
im_h, im_w, _ = img_s.shape
cpt = 0
indPatch_x = range(0 + args.step, im_h - args.patch_size, args.stride)
indPatch_y = range(0 + args.step, im_w - args.patch_size, args.stride)
#numPatch_per_img = int((im_h-hparams.patch_size)/hparams.stride) * int((im_w-hparams.patch_size)/hparams.stride)
numPatch_per_img = len(indPatch_x) * len(indPatch_y)
inputs_img_scale = np.zeros((numPatch_per_img, args.patch_size, args.patch_size, 1), dtype = 'uint8')
noisyinputs_img_scale = np.zeros((numPatch_per_img, args.patch_size, args.patch_size, 1), dtype = 'uint8')
#print(inputs_img_scale.shape)
for x in indPatch_x:
for y in indPatch_y:
#en fonction du mode : 0 normal, 1 flip up/down, 2 rotate 90, ..
#inputs[count, :, :, :] = data_augmentation(img_s[x:x + args.pat_size, y:y + args.pat_size, :], np.random.randint(0, 7))
#du coup je veux juste version normale de l'image
inputs_img_scale[cpt,: ,: ,:] = img_s[x:x + args.patch_size, y:y + args.patch_size, :]
noisyinputs_img_scale[cpt,: ,: ,:] = noisyimg_s[x:x + args.patch_size, y:y + args.patch_size, :]
cpt += 1
#shuffle the different patches of an image similarly on noisy and clean images
perm_idx = np.random.permutation(cpt)[:args.patch_per_image]
#print("perm_idx", perm_idx.shape, perm_idx)
inputs[cpt_img_scale: cpt_img_scale + args.patch_per_image, :, :, :] = inputs_img_scale[perm_idx, :, :, :]
noisyinputs[cpt_img_scale:cpt_img_scale + args.patch_per_image, :, :, :] = noisyinputs_img_scale[perm_idx, :, :, :]
cpt_img_scale += 1
del img, noisyimg
#if hparams.phase_type == 'phi':
# rdm = None
#else:
# rdm = np.random.randint(0, 2, inputs.shape[0])
#inputs_n = normalize_data(inputs, hparams.phase_type, rdm)
#noisyinputs_n = normalize_data(noisyinputs, hparams.phase_type, rdm)
#print("Count total nb of patches = ", cpt_img_scale * hparams.patch_per_image)
# pad the batch (on complete le dernier batch avec les premiers inputs
if count < numPatches:
to_pad = numPatches - count
print('Nb of patches added for padding to batch size: ', to_pad)
inputs[-to_pad:, :, :, :] = inputs[:to_pad, :, :, :]
noisyinputs[-to_pad:, :, :, :] = noisyinputs[:to_pad, :, :, :]
#check input images
#import matplotlib.pyplot as plt
#plt.imsave('test0_clean', inputs[0,: ,:,0], cmap = 'Greys')
#plt.imsave('test0_noisy', noisyinputs[0,: ,:,0], cmap = 'Greys')
print('shape of inputs: ', inputs.shape)
print('amplitude of inputs: ', np.max(inputs), np.min(inputs))
sess_name = extract_sess_name(args.train_patterns, args.train_noise, args.phase_type, args.stride , args.patch_size, args.patch_per_image)
if not os.path.exists(args.save_dir):
os.mkdir(args.save_dir)
np.save(os.path.join(args.save_dir, "img_clean_train_" + sess_name), inputs)
np.save(os.path.join(args.save_dir, "img_noisy_train_" + sess_name), noisyinputs)
print("size of inputs tensor = " + str(inputs.shape))
if __name__ == '__main__':
generate_patches(args.isDebug)
# -*- coding: utf-8 -*-
#
# This file is part of DnCnn4Holo.
#
# Adapted from https://github.com/wbhu/DnCNN-tensorflow by Hu Wenbo
#
# DnCnn4Holo is a python script for phase image denoising.
# Home page: https://git-lium.univ-lemans.fr/tahon/dncnn-tensorflow-holography
#
# DnCnn4Holo is free software: you can redistribute it and/or modify
# it under the terms of the GNU LLesser General Public License as
# published by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# DnCnn4Holo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DnCnn4Holo. If not, see <http://www.gnu.org/licenses/>.
"""
Copyright 2019-2020 Marie Tahon
:mod:`generate_patches_holo_fromMat.py` generate patches from Matlab images
"""
import argparse
import argument
#import re
#import glob
import pathlib
import os, sys
from PIL import Image
import PIL
#import random
import numpy as np
from utils import *
#from hparams import hparams, hparams_debug_string
from scipy.io import loadmat
__license__ = "LGPL"
__author__ = "Marie Tahon"
__copyright__ = "Copyright 2019-2020 Marie Tahon"
__maintainer__ = "Marie Tahon"
__email__ = "marie.tahon@univ-lemans.fr"
__status__ = "Production"
#__docformat__ = 'reStructuredText'
# the pixel value range is '0-255'(uint8 ) of training data
# macro
#DATA_AUG_TIMES = 1 # transform a sample to a different sample for DATA_AUG_TIMES times
parser = argparse.ArgumentParser(description='')
#parser.add_argument('--clean_src_dir', dest='clean_src_dir', default='/lium/raid01_c/tahon/holography/NOISEFREEHOLODEEP', help='dir of clean data')
#parser.add_argument('--noise_src_dir', dest='noise_src_dir', default='/lium/raid01_c/tahon/holography/HOLODEEP', help='dir of noisy data')
#parser.add_argument('--train_image', dest='train_patterns', default=hparams.train_patterns, help='patterns of images for training')
#parser.add_argument('--train_noise', dest='train_noise', default=hparams.train_noise, help='noise values for training images')
parser.add_argument('--save_dir', dest='save_dir', default='./data1', help='dir of patches')
#parser.add_argument('--patch_size', dest='pat_size', type=int, default=hparams.patch_size, help='patch size')#50 for RGB and 70 for grayscale
#parser.add_argument('--stride', dest='stride', type=int, default=hparams.stride, help='stride')
#parser.add_argument('--step', dest='step', type=int, default=hparams.step, help='step')
parser.add_argument('--params', dest='params', type=str, default='', help='hyper parameters')
# check output arguments
#parser.add_argument('--from_file', dest='from_file', default="./data/img_clean_pats.npy", help='get pic from file')
#parser.add_argument('--num_pic', dest='num_pic', type=int, default=10, help='number of pic to pick')
args = parser.parse_args()
#print(args.params['patch_size'])
#hparams.parse(args.params)
#import ipdb
args=argument.parse()
def generate_patches(isDebug=True):
#global DATA_AUG_TIMES = 1
#hparams.patch_size = args.pat_size
# print(hparams_debug_string())
#filepaths = [x for x in src_path.glob('*.tiff')] #('*.mat')
#noisyfilepaths = [x for x in noisy_path.glob('*.tiff')] #('*.mat')
cleanmat, noisymat = from_DATABASE(args.train_dir, args.train_noises, args.train_patterns)
#ipdb.set_trace()
print("number of clean training data {0} and noisy {1}".format( len(cleanmat), len(noisymat)))
scales = 1 #et on ne le bouge pas !!!! hparams.scales #old version [1, 0.9, 0.8, 0.7]
if args.patch_size > args.originalsize[0]:
sys.exit('patch size > size of original size of images')
nb_image = len(cleanmat)
nb_patch_per_image = int((args.originalsize[0] - args.patch_size)/args.stride + 1) * int((args.originalsize[1] - args.patch_size)/args.stride + 1) #(1024 - 50)/50 + 1 = 20 -> 20*20 = 400 patch per img
nb_origin_patch = nb_patch_per_image * nb_image
nb_final_patch = args.patch_per_image * nb_image
print("total number of patches for all taining images = ", nb_origin_patch, " and used patches = ", nb_final_patch)
if nb_final_patch % args.batch_size != 0:
#if origin_patch_num > hparams.batch_size:
numPatches = int(nb_final_patch / args.batch_size + 1) * args.batch_size
else:
numPatches = nb_final_patch
print ("total patches = %d , batch size = %d, total batches = %d" % (numPatches, args.batch_size, numPatches / args.batch_size))
# data matrix 4-D
cleaninputs = np.zeros((numPatches, args.patch_size, args.patch_size, 1))
noisyinputs = np.zeros((numPatches, args.patch_size, args.patch_size, 1))
print("Shape of input (including noisy) : ", cleaninputs.shape)
#ipdb.set_trace()
cpt_img_scale = 0
# generate patches
for i in range(nb_image):
cleanimg = cleanmat[i] ##import matlab image img = loadmat(filepaths[i]) ? TO CHECK
#noisyimg = Image.open(noisyfilepaths[i]).convert('L') # convert RGB to gray, no need to convert: grayscale
noisyimg = noisymat[i] ##import matlab image img = loadmat(filepaths[i]) ? TO CHECK
#for s in range(len(scales)):
# newsize = (int(img.size[0] * scales[s]), int(img.size[1] * scales[s]))
# # print newsize