Skip to content
GitLab
Menu
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Marie Tahon
DnCNN-tensorflow-holography
Commits
8f519968
Commit
8f519968
authored
Mar 26, 2021
by
Touklakos
Browse files
MAJ Sprint 5
parent
0d7735ed
Changes
10
Expand all
Hide whitespace changes
Inline
Side-by-side
argument.py
View file @
8f519968
...
...
@@ -76,12 +76,14 @@ def parse():
parser
.
add_argument
(
'--exp_file'
,
dest
=
'exp_file'
,
type
=
str
,
help
=
'experiment file'
)
parser
.
add_argument
(
'--nb_iteration'
,
dest
=
'nb_iteration'
,
type
=
int
,
default
=
3
,
help
=
'number of iteration for de-noising operation'
)
parser
.
add_argument
(
'--nb_rotation'
,
dest
=
'nb_rotation'
,
type
=
int
,
default
=
8
,
help
=
'number of ration for data augmentation'
)
parser
.
add_argument
(
'--isDebug'
,
dest
=
'isDebug'
,
action
=
'store_true'
)
parser
.
add_argument
(
'--patch_size'
,
dest
=
'patch_size'
,
default
=
50
)
parser
.
add_argument
(
'--stride'
,
dest
=
'stride'
,
default
=
50
)
parser
.
add_argument
(
'--step'
,
dest
=
'step'
,
default
=
0
)
parser
.
add_argument
(
'--freq_save'
,
dest
=
'freq_save'
,
type
=
int
,
default
=
1
)
parser
.
add_argument
(
'--phase_type'
,
dest
=
'phase_type'
,
default
=
"two"
)
parser
.
add_argument
(
'--patch_per_image'
,
dest
=
'patch_per_image'
,
default
=
384
)
parser
.
add_argument
(
'--noise_src_dir'
,
dest
=
'noise_src_dir'
,
default
=
"./chemin/"
)
...
...
data.py
View file @
8f519968
...
...
@@ -57,7 +57,7 @@ class NoisyBSDSDataset(td.Dataset):
class
TrainDataset
(
NoisyBSDSDataset
):
def
__init__
(
self
,
clean
,
noisy
,
image_mode
,
image_size
):
def
__init__
(
self
,
clean
,
noisy
,
image_mode
,
image_size
,
nb_rotation
=
8
):
""" Initialize the data loader
Arguments:
...
...
@@ -80,10 +80,8 @@ class TrainDataset(NoisyBSDSDataset):
self
.
clean
=
normalize_data
(
self
.
clean
,
'two'
,
rdm
,
True
)
self
.
noisy
=
normalize_data
(
self
.
noisy
,
'two'
,
rdm
,
True
)
rotation
=
8
self
.
clean
=
rotate_data
(
self
.
clean
,
rotation
)
self
.
noisy
=
rotate_data
(
self
.
noisy
,
rotation
)
self
.
clean
=
rotate_data
(
self
.
clean
,
nb_rotation
)
self
.
noisy
=
rotate_data
(
self
.
noisy
,
nb_rotation
)
print
(
"data_size : "
,
self
.
clean
.
shape
)
print
(
"data_type : "
,
type
(
self
.
clean
))
...
...
generate_patches_holo.py
View file @
8f519968
This diff is collapsed.
Click to expand it.
generate_patches_holo_fromMAT.py
View file @
8f519968
# -*- coding: utf-8 -*-
#
# This file is part of DnCnn4Holo.
#
# Adapted from https://github.com/wbhu/DnCNN-tensorflow by Hu Wenbo
#
# DnCnn4Holo is a python script for phase image denoising.
# Home page: https://git-lium.univ-lemans.fr/tahon/dncnn-tensorflow-holography
#
# DnCnn4Holo is free software: you can redistribute it and/or modify
# it under the terms of the GNU LLesser General Public License as
# published by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# DnCnn4Holo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DnCnn4Holo. If not, see <http://www.gnu.org/licenses/>.
"""
Copyright 2019-2020 Marie Tahon
:mod:`generate_patches_holo_fromMat.py` generate patches from Matlab images
"""
import
argparse
import
argument
#import re
#import glob
import
pathlib
import
os
,
sys
from
PIL
import
Image
import
PIL
#import random
import
numpy
as
np
from
utils
import
*
#from hparams import hparams, hparams_debug_string
from
scipy.io
import
loadmat
__license__
=
"LGPL"
__author__
=
"Marie Tahon"
__copyright__
=
"Copyright 2019-2020 Marie Tahon"
__maintainer__
=
"Marie Tahon"
__email__
=
"marie.tahon@univ-lemans.fr"
__status__
=
"Production"
#__docformat__ = 'reStructuredText'
# the pixel value range is '0-255'(uint8 ) of training data
# macro
#DATA_AUG_TIMES = 1 # transform a sample to a different sample for DATA_AUG_TIMES times
parser
=
argparse
.
ArgumentParser
(
description
=
''
)
#parser.add_argument('--clean_src_dir', dest='clean_src_dir', default='/lium/raid01_c/tahon/holography/NOISEFREEHOLODEEP', help='dir of clean data')
#parser.add_argument('--noise_src_dir', dest='noise_src_dir', default='/lium/raid01_c/tahon/holography/HOLODEEP', help='dir of noisy data')
#parser.add_argument('--train_image', dest='train_patterns', default=hparams.train_patterns, help='patterns of images for training')
#parser.add_argument('--train_noise', dest='train_noise', default=hparams.train_noise, help='noise values for training images')
parser
.
add_argument
(
'--save_dir'
,
dest
=
'save_dir'
,
default
=
'./data1'
,
help
=
'dir of patches'
)
#parser.add_argument('--patch_size', dest='pat_size', type=int, default=hparams.patch_size, help='patch size')#50 for RGB and 70 for grayscale
#parser.add_argument('--stride', dest='stride', type=int, default=hparams.stride, help='stride')
#parser.add_argument('--step', dest='step', type=int, default=hparams.step, help='step')
parser
.
add_argument
(
'--params'
,
dest
=
'params'
,
type
=
str
,
default
=
''
,
help
=
'hyper parameters'
)
# check output arguments
#parser.add_argument('--from_file', dest='from_file', default="./data/img_clean_pats.npy", help='get pic from file')
#parser.add_argument('--num_pic', dest='num_pic', type=int, default=10, help='number of pic to pick')
args
=
parser
.
parse_args
()
#print(args.params['patch_size'])
#hparams.parse(args.params)
#import ipdb
args
=
argument
.
parse
()
def
generate_patches
(
isDebug
=
True
):
#global DATA_AUG_TIMES = 1
#hparams.patch_size = args.pat_size
# print(hparams_debug_string())
#filepaths = [x for x in src_path.glob('*.tiff')] #('*.mat')
#noisyfilepaths = [x for x in noisy_path.glob('*.tiff')] #('*.mat')
cleanmat
,
noisymat
=
from_DATABASE
(
args
.
train_dir
,
args
.
train_noises
,
args
.
train_patterns
)
#ipdb.set_trace()
print
(
"number of clean training data {0} and noisy {1}"
.
format
(
len
(
cleanmat
),
len
(
noisymat
)))
scales
=
1
#et on ne le bouge pas !!!! hparams.scales #old version [1, 0.9, 0.8, 0.7]
if
args
.
patch_size
>
args
.
originalsize
[
0
]:
sys
.
exit
(
'patch size > size of original size of images'
)
nb_image
=
len
(
cleanmat
)
nb_patch_per_image
=
int
((
args
.
originalsize
[
0
]
-
args
.
patch_size
)
/
args
.
stride
+
1
)
*
int
((
args
.
originalsize
[
1
]
-
args
.
patch_size
)
/
args
.
stride
+
1
)
#(1024 - 50)/50 + 1 = 20 -> 20*20 = 400 patch per img
nb_origin_patch
=
nb_patch_per_image
*
nb_image
nb_final_patch
=
args
.
patch_per_image
*
nb_image
print
(
"total number of patches for all taining images = "
,
nb_origin_patch
,
" and used patches = "
,
nb_final_patch
)
if
nb_final_patch
%
args
.
batch_size
!=
0
:
#if origin_patch_num > hparams.batch_size:
numPatches
=
int
(
nb_final_patch
/
args
.
batch_size
+
1
)
*
args
.
batch_size
else
:
numPatches
=
nb_final_patch
print
(
"total patches = %d , batch size = %d, total batches = %d"
%
(
numPatches
,
args
.
batch_size
,
numPatches
/
args
.
batch_size
))
# data matrix 4-D
cleaninputs
=
np
.
zeros
((
numPatches
,
args
.
patch_size
,
args
.
patch_size
,
1
))
noisyinputs
=
np
.
zeros
((
numPatches
,
args
.
patch_size
,
args
.
patch_size
,
1
))
print
(
"Shape of input (including noisy) : "
,
cleaninputs
.
shape
)
#ipdb.set_trace()
cpt_img_scale
=
0
# generate patches
for
i
in
range
(
nb_image
):
cleanimg
=
cleanmat
[
i
]
##import matlab image img = loadmat(filepaths[i]) ? TO CHECK
#noisyimg = Image.open(noisyfilepaths[i]).convert('L') # convert RGB to gray, no need to convert: grayscale
noisyimg
=
noisymat
[
i
]
##import matlab image img = loadmat(filepaths[i]) ? TO CHECK
#for s in range(len(scales)):
# newsize = (int(img.size[0] * scales[s]), int(img.size[1] * scales[s]))
# # print newsize
# img_s = img.resize(newsize, resample=PIL.Image.BICUBIC)
# img_s = np.reshape(np.array(img_s, dtype="uint8"), (img_s.size[0], img_s.size[1], 1)) # extend one dimension
# noisyimg_s = noisyimg.resize(newsize, resample=PIL.Image.BICUBIC)
# noisyimg_s = np.reshape(np.array(noisyimg_s, dtype="uint8"), (noisyimg_s.size[0], noisyimg_s.size[1], 1)) # extend one dimension
# for j in range(DATA_AUG_TIMES):
# im_h, im_w, _ = img_s.shape
cpt
=
0
inputs_img_scale
=
np
.
zeros
((
nb_patch_per_image
,
args
.
patch_size
,
args
.
patch_size
,
1
))
noisyinputs_img_scale
=
np
.
zeros
((
nb_patch_per_image
,
args
.
patch_size
,
args
.
patch_size
,
1
))
for
x
in
range
(
0
+
args
.
step
,
args
.
originalsize
[
0
]
-
args
.
patch_size
,
args
.
stride
):
for
y
in
range
(
0
+
args
.
step
,
args
.
originalsize
[
1
]
-
args
.
patch_size
,
args
.
stride
):
#print(x,y)
#en fonction du mode : 0 normal, 1 flip up/down, 2 rotate 90, ..
#inputs[count, :, :, :] = data_augmentation(img_s[x:x + args.pat_size, y:y + args.pat_size, :], np.random.randint(0, 7))
#du coup je veux juste version normale de l'image
inputs_img_scale
[
cpt
,:
,:
,:]
=
cleanimg
[:,
x
:
x
+
args
.
patch_size
,
y
:
y
+
args
.
patch_size
,
:]
noisyinputs_img_scale
[
cpt
,:
,:
,:]
=
noisyimg
[:,
x
:
x
+
args
.
patch_size
,
y
:
y
+
args
.
patch_size
,
:]
cpt
+=
1
#shuffle the different patches of an image similarly on noisy and clean images
perm_idx
=
np
.
random
.
permutation
(
cpt
)[:
args
.
patch_per_image
]
#print("perm_idx", perm_idx.shape, perm_idx)
cleaninputs
[
cpt_img_scale
:
cpt_img_scale
+
args
.
patch_per_image
,
:,
:,
:]
=
inputs_img_scale
[
perm_idx
,
:,
:,
:]
noisyinputs
[
cpt_img_scale
:
cpt_img_scale
+
args
.
patch_per_image
,
:,
:,
:]
=
noisyinputs_img_scale
[
perm_idx
,
:,
:,
:]
cpt_img_scale
+=
args
.
patch_per_image
#del img, noisyimg
#if hparams.phase_type == 'phi':
# rdm = None
#else:
# rdm = np.random.randint(0, 2, inputs.shape[0])
#inputs_n = normalize_data(inputs, hparams.phase_type, rdm)
#noisyinputs_n = normalize_data(noisyinputs, hparams.phase_type, rdm)
#ipdb.set_trace()
#print("Count total nb of patches = ", cpt_img_scale * hparams.patch_per_image)
# pad the batch (on complete le dernier batch avec les premiers inputs
if
nb_final_patch
<
numPatches
:
to_pad
=
numPatches
-
nb_final_patch
print
(
'Nb of patches added for padding to batch size: '
,
to_pad
)
cleaninputs
[
-
to_pad
:,
:,
:,
:]
=
cleaninputs
[:
to_pad
,
:,
:,
:]
noisyinputs
[
-
to_pad
:,
:,
:,
:]
=
noisyinputs
[:
to_pad
,
:,
:,
:]
#check input images
#import matplotlib.pyplot as plt
#plt.imsave('test0_clean', inputs[0,: ,:,0], cmap = 'Greys')
#plt.imsave('test0_noisy', noisyinputs[0,: ,:,0], cmap = 'Greys')
print
(
'shape of inputs: '
,
cleaninputs
.
shape
)
print
(
'amplitude of inputs: '
,
np
.
max
(
cleaninputs
),
np
.
min
(
cleaninputs
))
sess_name
=
extract_sess_name
(
args
.
train_patterns
,
args
.
train_noises
,
args
.
phase_type
,
args
.
stride
,
args
.
patch_size
,
args
.
patch_per_image
)
if
not
os
.
path
.
exists
(
args
.
save_dir
):
os
.
mkdir
(
args
.
save_dir
)
np
.
save
(
os
.
path
.
join
(
args
.
save_dir
,
"img_clean_train_"
+
sess_name
),
cleaninputs
)
np
.
save
(
os
.
path
.
join
(
args
.
save_dir
,
"img_noisy_train_"
+
sess_name
),
noisyinputs
)
print
(
"size of inputs tensor = "
+
str
(
cleaninputs
.
shape
))
if
__name__
==
'__main__'
:
generate_patches
(
args
.
isDebug
)
# -*- coding: utf-8 -*-
#
# This file is part of DnCnn4Holo.
#
# Adapted from https://github.com/wbhu/DnCNN-tensorflow by Hu Wenbo
#
# DnCnn4Holo is a python script for phase image denoising.
# Home page: https://git-lium.univ-lemans.fr/tahon/dncnn-tensorflow-holography
#
# DnCnn4Holo is free software: you can redistribute it and/or modify
# it under the terms of the GNU LLesser General Public License as
# published by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# DnCnn4Holo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DnCnn4Holo. If not, see <http://www.gnu.org/licenses/>.
"""
Copyright 2019-2020 Marie Tahon
:mod:`generate_patches_holo_fromMat.py` generate patches from Matlab images
"""
import
argparse
import
argument
#import re
#import glob
import
pathlib
import
os
,
sys
from
PIL
import
Image
import
PIL
#import random
import
numpy
as
np
from
utils
import
*
#from hparams import hparams, hparams_debug_string
from
scipy.io
import
loadmat
__license__
=
"LGPL"
__author__
=
"Marie Tahon"
__copyright__
=
"Copyright 2019-2020 Marie Tahon"
__maintainer__
=
"Marie Tahon"
__email__
=
"marie.tahon@univ-lemans.fr"
__status__
=
"Production"
#__docformat__ = 'reStructuredText'
# the pixel value range is '0-255'(uint8 ) of training data
# macro
#DATA_AUG_TIMES = 1 # transform a sample to a different sample for DATA_AUG_TIMES times
parser
=
argparse
.
ArgumentParser
(
description
=
''
)
#parser.add_argument('--clean_src_dir', dest='clean_src_dir', default='/lium/raid01_c/tahon/holography/NOISEFREEHOLODEEP', help='dir of clean data')
#parser.add_argument('--noise_src_dir', dest='noise_src_dir', default='/lium/raid01_c/tahon/holography/HOLODEEP', help='dir of noisy data')
#parser.add_argument('--train_image', dest='train_patterns', default=hparams.train_patterns, help='patterns of images for training')
#parser.add_argument('--train_noise', dest='train_noise', default=hparams.train_noise, help='noise values for training images')
parser
.
add_argument
(
'--save_dir'
,
dest
=
'save_dir'
,
default
=
'./data1'
,
help
=
'dir of patches'
)
#parser.add_argument('--patch_size', dest='pat_size', type=int, default=hparams.patch_size, help='patch size')#50 for RGB and 70 for grayscale
#parser.add_argument('--stride', dest='stride', type=int, default=hparams.stride, help='stride')
#parser.add_argument('--step', dest='step', type=int, default=hparams.step, help='step')
parser
.
add_argument
(
'--params'
,
dest
=
'params'
,
type
=
str
,
default
=
''
,
help
=
'hyper parameters'
)
# check output arguments
#parser.add_argument('--from_file', dest='from_file', default="./data/img_clean_pats.npy", help='get pic from file')
#parser.add_argument('--num_pic', dest='num_pic', type=int, default=10, help='number of pic to pick')
args
=
parser
.
parse_args
()
#print(args.params['patch_size'])
#hparams.parse(args.params)
#import ipdb
args
=
argument
.
parse
()
def
generate_patches
(
isDebug
=
True
):
#global DATA_AUG_TIMES = 1
#hparams.patch_size = args.pat_size
# print(hparams_debug_string())
#filepaths = [x for x in src_path.glob('*.tiff')] #('*.mat')
#noisyfilepaths = [x for x in noisy_path.glob('*.tiff')] #('*.mat')
cleanmat
,
noisymat
=
from_DATABASE
(
args
.
train_dir
,
args
.
train_noises
,
args
.
train_patterns
)
#ipdb.set_trace()
print
(
"number of clean training data {0} and noisy {1}"
.
format
(
len
(
cleanmat
),
len
(
noisymat
)))
scales
=
1
#et on ne le bouge pas !!!! hparams.scales #old version [1, 0.9, 0.8, 0.7]
if
args
.
patch_size
>
args
.
originalsize
[
0
]:
sys
.
exit
(
'patch size > size of original size of images'
)
nb_image
=
len
(
cleanmat
)
nb_patch_per_image
=
int
((
args
.
originalsize
[
0
]
-
args
.
patch_size
)
/
args
.
stride
+
1
)
*
int
((
args
.
originalsize
[
1
]
-
args
.
patch_size
)
/
args
.
stride
+
1
)
#(1024 - 50)/50 + 1 = 20 -> 20*20 = 400 patch per img
nb_origin_patch
=
nb_patch_per_image
*
nb_image
nb_final_patch
=
args
.
patch_per_image
*
nb_image
print
(
"total number of patches for all taining images = "
,
nb_origin_patch
,
" and used patches = "
,
nb_final_patch
)
if
nb_final_patch
%
args
.
batch_size
!=
0
:
#if origin_patch_num > hparams.batch_size:
numPatches
=
int
(
nb_final_patch
/
args
.
batch_size
+
1
)
*
args
.
batch_size
else
:
numPatches
=
nb_final_patch
print
(
"total patches = %d , batch size = %d, total batches = %d"
%
(
numPatches
,
args
.
batch_size
,
numPatches
/
args
.
batch_size
))
# data matrix 4-D
cleaninputs
=
np
.
zeros
((
numPatches
,
args
.
patch_size
,
args
.
patch_size
,
1
))
noisyinputs
=
np
.
zeros
((
numPatches
,
args
.
patch_size
,
args
.
patch_size
,
1
))
print
(
"Shape of input (including noisy) : "
,
cleaninputs
.
shape
)
#ipdb.set_trace()
cpt_img_scale
=
0
# generate patches
for
i
in
range
(
nb_image
):
cleanimg
=
cleanmat
[
i
]
##import matlab image img = loadmat(filepaths[i]) ? TO CHECK
#noisyimg = Image.open(noisyfilepaths[i]).convert('L') # convert RGB to gray, no need to convert: grayscale
noisyimg
=
noisymat
[
i
]
##import matlab image img = loadmat(filepaths[i]) ? TO CHECK
#for s in range(len(scales)):
# newsize = (int(img.size[0] * scales[s]), int(img.size[1] * scales[s]))
# # print newsize
# img_s = img.resize(newsize, resample=PIL.Image.BICUBIC)
# img_s = np.reshape(np.array(img_s, dtype="uint8"), (img_s.size[0], img_s.size[1], 1)) # extend one dimension
# noisyimg_s = noisyimg.resize(newsize, resample=PIL.Image.BICUBIC)
# noisyimg_s = np.reshape(np.array(noisyimg_s, dtype="uint8"), (noisyimg_s.size[0], noisyimg_s.size[1], 1)) # extend one dimension
# for j in range(DATA_AUG_TIMES):
# im_h, im_w, _ = img_s.shape
cpt
=
0
inputs_img_scale
=
np
.
zeros
((
nb_patch_per_image
,
args
.
patch_size
,
args
.
patch_size
,
1
))
noisyinputs_img_scale
=
np
.
zeros
((
nb_patch_per_image
,
args
.
patch_size
,
args
.
patch_size
,
1
))
for
x
in
range
(
0
+
args
.
step
,
args
.
originalsize
[
0
]
-
args
.
patch_size
,
args
.
stride
):
for
y
in
range
(
0
+
args
.
step
,
args
.
originalsize
[
1
]
-
args
.
patch_size
,
args
.
stride
):
#print(x,y)
#en fonction du mode : 0 normal, 1 flip up/down, 2 rotate 90, ..
#inputs[count, :, :, :] = data_augmentation(img_s[x:x + args.pat_size, y:y + args.pat_size, :], np.random.randint(0, 7))
#du coup je veux juste version normale de l'image
inputs_img_scale
[
cpt
,:
,:
,:]
=
cleanimg
[:,
x
:
x
+
args
.
patch_size
,
y
:
y
+
args
.
patch_size
,
:]
noisyinputs_img_scale
[
cpt
,:
,:
,:]
=
noisyimg
[:,
x
:
x
+
args
.
patch_size
,
y
:
y
+
args
.
patch_size
,
:]
cpt
+=
1
#shuffle the different patches of an image similarly on noisy and clean images
perm_idx
=
np
.
random
.
permutation
(
cpt
)[:
args
.
patch_per_image
]
#print("perm_idx", perm_idx.shape, perm_idx)
cleaninputs
[
cpt_img_scale
:
cpt_img_scale
+
args
.
patch_per_image
,
:,
:,
:]
=
inputs_img_scale
[
perm_idx
,
:,
:,
:]
noisyinputs
[
cpt_img_scale
:
cpt_img_scale
+
args
.
patch_per_image
,
:,
:,
:]
=
noisyinputs_img_scale
[
perm_idx
,
:,
:,
:]
cpt_img_scale
+=
args
.
patch_per_image
#del img, noisyimg
#if hparams.phase_type == 'phi':
# rdm = None
#else:
# rdm = np.random.randint(0, 2, inputs.shape[0])
#inputs_n = normalize_data(inputs, hparams.phase_type, rdm)
#noisyinputs_n = normalize_data(noisyinputs, hparams.phase_type, rdm)
#ipdb.set_trace()
#print("Count total nb of patches = ", cpt_img_scale * hparams.patch_per_image)
# pad the batch (on complete le dernier batch avec les premiers inputs
if
nb_final_patch
<
numPatches
:
to_pad
=
numPatches
-
nb_final_patch
print
(
'Nb of patches added for padding to batch size: '
,
to_pad
)
cleaninputs
[
-
to_pad
:,
:,
:,
:]
=
cleaninputs
[:
to_pad
,
:,
:,
:]
noisyinputs
[
-
to_pad
:,
:,
:,
:]
=
noisyinputs
[:
to_pad
,
:,
:,
:]
#check input images
#import matplotlib.pyplot as plt
#plt.imsave('test0_clean', inputs[0,: ,:,0], cmap = 'Greys')
#plt.imsave('test0_noisy', noisyinputs[0,: ,:,0], cmap = 'Greys')
print
(
'shape of inputs: '
,
cleaninputs
.
shape
)
print
(
'amplitude of inputs: '
,
np
.
max
(
cleaninputs
),
np
.
min
(
cleaninputs
))
sess_name
=
extract_sess_name
(
args
.
train_patterns
,
args
.
train_noises
,
args
.
phase_type
,
args
.
stride
,
args
.
patch_size
,
args
.
patch_per_image
)
if
not
os
.
path
.
exists
(
args
.
save_dir
):
os
.
mkdir
(
args
.
save_dir
)
np
.
save
(
os
.
path
.
join
(
args
.
save_dir
,
"img_clean_train_"
+
sess_name
),
cleaninputs
)
np
.
save
(
os
.
path
.
join
(
args
.
save_dir
,
"img_noisy_train_"
+
sess_name
),
noisyinputs
)
print
(
"size of inputs tensor = "
+
str
(
cleaninputs
.
shape
))
if
__name__
==
'__main__'
:
generate_patches
(
args
.
isDebug
)
main_holo.py
View file @
8f519968
...
...
@@ -65,7 +65,7 @@ def evaluate_on_HOLODEEP(args, exp):
patterns
=
args
.
test_patterns
noises
=
args
.
test_noises
clean
,
noisy
=
from_DATABASE
(
args
.
eval_dir
,
noises
,
patterns
,
Fals
e
)
clean
,
noisy
=
from_DATABASE
(
args
.
eval_dir
,
noises
,
patterns
,
Tru
e
)
clean
=
np
.
array
(
clean
)
noisy
=
np
.
array
(
noisy
)
...
...
@@ -109,7 +109,7 @@ def evaluate_on_DATAEVAL(args, exp):
def
denoise_img
(
args
,
noisy
,
clean
,
name
,
exp
,
nb_iteration
=
3
):
def
denoise_img
(
args
,
noisy
,
clean
,
name
,
exp
):
"""This method is used to do and save a de-noising operation on a given image
Arguments:
...
...
@@ -118,10 +118,10 @@ def denoise_img(args, noisy, clean, name, exp, nb_iteration=3):
clean (numpy.array) : The clean reference
name (str) : The name used to save the results
exp (Experiment) : The model used to do the de-noising operation
nb_iteration (int, optional) : The number of iteration to de-noise the image
"""
clean_pred_rad
=
noisy
nb_iteration
=
args
.
nb_iteration
for
j
in
range
(
nb_iteration
):
clean_pred_rad
=
denoising_single_image
(
args
,
clean_pred_rad
,
exp
)
...
...
@@ -146,11 +146,12 @@ def denoising_single_image(args, noisy, exp):
noisyPy_cos
=
torch
.
Tensor
(
normalize_data
(
noisyPy
,
'cos'
,
None
))
noisyPy_sin
=
torch
.
Tensor
(
normalize_data
(
noisyPy
,
'sin'
,
None
))
clean_pred_cos
=
exp
.
test
(
noisyPy_cos
)
clean_pred_sin
=
exp
.
test
(
noisyPy_sin
)
clean_pred_rad
=
torch
.
angle
(
clean_pred_cos
+
clean_pred_sin
*
1J
)
clean_pred_rad
=
clean_pred_rad
.
detach
().
cpu
().
numpy
().
reshape
(
1
,
args
.
test_image_size
[
0
],
args
.
test_image_size
[
1
],
args
.
image_mode
)
clean_pred_cos
=
exp
.
test
(
noisyPy_cos
).
detach
().
cpu
().
numpy
()
clean_pred_sin
=
exp
.
test
(
noisyPy_sin
).
detach
().
cpu
().
numpy
()
clean_pred_rad
=
np
.
angle
(
clean_pred_cos
+
clean_pred_sin
*
1J
)
clean_pred_rad
=
clean_pred_rad
.
reshape
(
1
,
args
.
test_image_size
[
0
],
args
.
test_image_size
[
1
],
args
.
image_mode
)
return
clean_pred_rad
...
...
@@ -162,9 +163,7 @@ def run(args):
device
=
'cuda'
if
torch
.
cuda
.
is_available
()
else
'cpu'
trainData
=
TrainDataset
(
args
.
clean_train
,
args
.
noisy_train
,
args
.
image_mode
,
args
.
train_image_size
)
evalData
=
EvalDataset
(
args
.
eval_dir
,
args
.
eval_noises
,
args
.
eval_patterns
,
args
.
image_mode
,
args
.
eval_image_size
)
net
=
DnCNN
(
D
=
args
.
D
,
C
=
args
.
C
,
image_mode
=
args
.
image_mode
).
to
(
device
)
adam
=
torch
.
optim
.
Adam
(
net
.
parameters
(),
lr
=
args
.
lr
)
...
...
@@ -172,12 +171,17 @@ def run(args):
exp
=
nt
.
Experiment
(
net
,
trainData
,
evalData
,
adam
,
statsManager
,
batch_size
=
args
.
batch_size
,
perform_validation_during_training
=
args
.
perform_validation
,
input_dir
=
args
.
input_dir
,
startEpoch
=
args
.
epoch
)
exp
=
nt
.
Experiment
(
net
,
adam
,
statsManager
,
batch_size
=
args
.
batch_size
,
perform_validation_during_training
=
args
.
perform_validation
,
input_dir
=
args
.
input_dir
,
startEpoch
=
args
.
epoch
,
freq_save
=
args
.
freq_save
)
if
not
args
.
test_mode
:
print
(
"
\n
=>Training until epoch :<===
\n
"
,
args
.
num_epochs
)
print
(
"
\n
\Model training"
)
trainData
=
TrainDataset
(
args
.
clean_train
,
args
.
noisy_train
,
args
.
image_mode
,
args
.
train_image_size
,
nb_rotation
=
args
.
nb_rotation
)
evalData
=
EvalDataset
(
args
.
eval_dir
,
args
.
eval_noises
,
args
.
eval_patterns
,
args
.
image_mode
,
args
.
eval_image_size
)
exp
.
initData
(
trainData
,
evalData
)
exp
.
run
(
num_epochs
=
args
.
num_epochs
)
if
(
args
.
graph
):
...
...
@@ -197,6 +201,7 @@ def run(args):
if
__name__
==
'__main__'
:
args
=
parse
()
print
(
"
\n\n
"
)
...
...
model.py
View file @
8f519968
import
torch
from
utils
import
NNRegressor
import
numpy
as
np
class
DnCNN
(
NNRegressor
):
""" This class is an implementation of the DnCNN (Deep Convolutional neural network)
"""
def
__init__
(
self
,
D
=
4
,
C
=
64
,
image_mode
=
1
):
""" Initialize the DnCNN
Arguments:
D(int, optional) : The number of layers
C(int, optional) : The number of output channel for the convolution
image_mode(int, optional) : The number of input channel of the images
"""
super
(
DnCNN
,
self
).
__init__
()
self
.
D
=
D
self
.
C
=
C
self
.
image_mode
=
image_mode
self
.
conv
=
torch
.
nn
.
ModuleList
()
self
.
conv
.
append
(
torch
.
nn
.
Conv2d
(
self
.
image_mode
,
self
.
C
,
3
,
padding
=
1
))
self
.
conv
.
extend
([
torch
.
nn
.
Conv2d
(
self
.
C
,
self
.
C
,
3
,
padding
=
1
)
for
_
in
range
(
self
.
D
)])
self
.
conv
.
append
(
torch
.
nn
.
Conv2d
(
self
.
C
,
self
.
image_mode
,
3
,
padding
=
1
))
for
i
in
range
(
len
(
self
.
conv
[:
-
1
])):
torch
.
nn
.
init
.
kaiming_normal_
(
self
.
conv
[
i
].
weight
.
data
,
nonlinearity
=
'relu'
)
self
.
bn
=
torch
.
nn
.
ModuleList
()
self
.
bn
.
extend
([
torch
.
nn
.
BatchNorm2d
(
self
.
C
,
self
.
C
)
for
_
in
range
(
self
.
D
)])
for
i
in
range
(
D
):
torch
.
nn
.
init
.
constant_
(
self
.
bn
[
i
].
weight
.
data
,
1.25
*
np
.
sqrt
(
self
.
C
))
def
forward
(
self
,
input
):
""" Take an input and pass it through the network
Arguments:
input(tensor) : The tensor that will feed the network
Return:
The tensor after his passage inside the network corresponding to the predicted noise
"""
h
=
torch
.
nn
.
functional
.
relu
(
self
.
conv
[
0
](
input
))
for
i
in
range
(
self
.
D
):
h
=
torch
.
nn
.
functional
.
relu
(
self
.
bn
[
i
](
self
.
conv
[
i
+
1
](
h
)))
y
=
self
.
conv
[
self
.
D
+
1
](
h
)
z
=
input
-
y
return
z
import
torch
from
utils
import
NNRegressor
import
numpy
as
np
class
DnCNN
(
NNRegressor
):
""" This class is an implementation of the DnCNN (Deep Convolutional neural network)
"""
def
__init__
(
self
,
D
=
4
,
C
=
64
,
image_mode
=
1
):
""" Initialize the DnCNN
Arguments:
D(int, optional) : The number of layers
C(int, optional) : The number of output channel for the convolution
image_mode(int, optional) : The number of input channel of the images
"""
super
(
DnCNN
,
self
).
__init__
()
self
.
D
=
D
self
.
C
=
C
self
.
image_mode
=
image_mode
self
.
conv
=
torch
.
nn
.
ModuleList
()
self
.
conv
.
append
(
torch
.
nn
.
Conv2d
(
self
.
image_mode
,
self
.
C
,
3
,
padding
=
1
))
self
.
conv
.
extend
([
torch
.
nn
.
Conv2d
(
self
.
C
,
self
.
C
,
3
,
padding
=
1
)
for
_
in
range
(
self
.
D
)])
self
.
conv
.
append
(
torch
.
nn
.
Conv2d
(
self
.
C
,
self
.
image_mode
,
3
,
padding
=
1
))
for
i
in
range
(
len
(
self
.
conv
[:
-
1
])):
torch
.
nn
.
init
.
kaiming_normal_
(
self
.
conv
[
i
].
weight
.
data
,
nonlinearity
=
'relu'
)
self
.
bn
=
torch
.
nn
.
ModuleList
()
self
.
bn
.
extend
([
torch
.
nn
.
BatchNorm2d
(
self
.
C
,
self
.
C
)
for
_
in
range
(
self
.
D
)])