Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Marie Tahon
DnCNN-tensorflow-holography
Commits
a3e852e4
Commit
a3e852e4
authored
Nov 27, 2019
by
Marie Tahon
Browse files
new parameter nb_layer + modification for sess_name
parent
5b5baf52
Changes
8
Hide whitespace changes
Inline
Side-by-side
README.md
View file @
a3e852e4
...
...
@@ -29,6 +29,7 @@ Modifiable parameters are located in hparams.py
#select images for testing
test_patterns = [5], #image pattern used for test
test_noise = [0, 1, 1.5], #noise level values for test images
nb_layers = 16, #nb of layers of the DnCNN architecture default is 16 (16*3 in reality)
noise_type = 'spkl', #type of noise: speckle or gaussian (spkl|gauss), gaussian noise can be used for this project
sigma = 25, #noise level for gaussian denoising not used in this project
...
...
VibMap2mat.py
0 → 100755
View file @
a3e852e4
import
numpy
as
np
from
scipy.io
import
loadmat
,
savemat
from
PIL
import
Image
vibmap
=
loadmat
(
'/lium/raid01_c/tahon/holography/DATAEVAL/VibPhaseDATA.mat'
)
mask
=
loadmat
(
'/lium/raid01_c/tahon/holography/DATAEVAL/Mask1024_Rect.mat'
)
new_vibmap
=
{}
for
key
in
[
'Phase'
,
'PhiCal'
,
'Phaseb'
,
'BRUIT'
]:
new_vibmap
[
key
]
=
np
.
array
(
vibmap
[
key
])
*
np
.
array
(
mask
[
'Mask'
])
savemat
(
'/lium/raid01_c/tahon/holography/DATAEVAL/VibPhaseDATA_masked.mat'
,
new_vibmap
,
appendmat
=
False
)
#check if savemat is correct
#nv = loadmat('/lium/raid01_c/tahon/holography/DATAEVAL/VibPhaseDATA_masked.mat')
#print(nv.keys())
#print(np.array(nv['Phase']).shape)
generate_patches_holo.py
View file @
a3e852e4
...
...
@@ -80,9 +80,8 @@ def generate_patches(isDebug=True):
#global DATA_AUG_TIMES = 1
#hparams.patch_size = args.pat_size
print
(
hparams_debug_string
())
#filepaths = [x for x in src_path.glob('*.tiff')] #('*.mat')
#noisyfilepaths = [x for x in noisy_path.glob('*.tiff')] #('*.mat')
filepaths
,
noisyfilepaths
=
from_HOLODEEP
(
hparams
.
noise_src_dir
,
hparams
.
clean_src_dir
,
hparams
.
train_noise
,
hparams
.
train_patterns
,
path_only
=
True
)
#filepaths, noisyfilepaths = from_NATURAL(hparams.noise_src_dir, hparams.clean_src_dir, path_only=True)
if
isDebug
:
filepaths
=
filepaths
[:
10
]
noisyfilepaths
=
noisyfilepaths
[:
10
]
...
...
@@ -142,6 +141,7 @@ def generate_patches(isDebug=True):
#print(filepaths[i])
img
=
Image
.
open
(
filepaths
[
i
]).
convert
(
'L'
)
#import matlab image img = loadmat(filepaths[i]) ? TO CHECK
noisyimg
=
Image
.
open
(
noisyfilepaths
[
i
]).
convert
(
'L'
)
# convert RGB to gray, no need to convert: grayscale
#print(img.size)
#img = filepaths[i]
#noisyimg = noisyfilepaths[i]
for
s
in
range
(
len
(
scales
)):
...
...
@@ -151,15 +151,19 @@ def generate_patches(isDebug=True):
img_s
=
np
.
reshape
(
np
.
array
(
img_s
,
dtype
=
"uint8"
),
(
img_s
.
size
[
0
],
img_s
.
size
[
1
],
1
))
# extend one dimension
noisyimg_s
=
noisyimg
.
resize
(
newsize
,
resample
=
PIL
.
Image
.
BICUBIC
)
noisyimg_s
=
np
.
reshape
(
np
.
array
(
noisyimg_s
,
dtype
=
"uint8"
),
(
noisyimg_s
.
size
[
0
],
noisyimg_s
.
size
[
1
],
1
))
# extend one dimension
#print(img_s.shape)
for
j
in
range
(
DATA_AUG_TIMES
):
im_h
,
im_w
,
_
=
img_s
.
shape
cpt
=
0
numPatch_per_img
=
int
((
im_h
-
hparams
.
patch_size
)
/
hparams
.
stride
)
*
int
((
im_w
-
hparams
.
patch_size
)
/
hparams
.
stride
)
indPatch_x
=
range
(
0
+
hparams
.
step
,
im_h
-
hparams
.
patch_size
,
hparams
.
stride
)
indPatch_y
=
range
(
0
+
hparams
.
step
,
im_w
-
hparams
.
patch_size
,
hparams
.
stride
)
#numPatch_per_img = int((im_h-hparams.patch_size)/hparams.stride) * int((im_w-hparams.patch_size)/hparams.stride)
numPatch_per_img
=
len
(
indPatch_x
)
*
len
(
indPatch_y
)
inputs_img_scale
=
np
.
zeros
((
numPatch_per_img
,
hparams
.
patch_size
,
hparams
.
patch_size
,
1
),
dtype
=
'uint8'
)
noisyinputs_img_scale
=
np
.
zeros
((
numPatch_per_img
,
hparams
.
patch_size
,
hparams
.
patch_size
,
1
),
dtype
=
'uint8'
)
for
x
in
range
(
0
+
hparams
.
step
,
im_h
-
hparams
.
patch_size
,
hparams
.
stride
):
for
y
in
range
(
0
+
hparams
.
step
,
im_w
-
hparams
.
patch_size
,
hparams
.
stride
):
#print(inputs_img_scale.shape)
for
x
in
indPatch_x
:
for
y
in
indPatch_y
:
#en fonction du mode : 0 normal, 1 flip up/down, 2 rotate 90, ..
#inputs[count, :, :, :] = data_augmentation(img_s[x:x + args.pat_size, y:y + args.pat_size, :], np.random.randint(0, 7))
#du coup je veux juste version normale de l'image
...
...
@@ -197,7 +201,7 @@ def generate_patches(isDebug=True):
print
(
'shape of inputs: '
,
inputs
.
shape
)
print
(
'amplitude of inputs: '
,
np
.
max
(
inputs
),
np
.
min
(
inputs
))
sess_name
=
extract_sess_name
(
hparams
.
train_patterns
,
hparams
.
train_noise
,
hparams
.
phase_type
)
sess_name
=
extract_sess_name
(
hparams
.
train_patterns
,
hparams
.
train_noise
,
hparams
.
phase_type
,
hparams
.
stride
,
hparams
.
patch_size
,
hparams
.
patch_per_image
)
if
not
os
.
path
.
exists
(
args
.
save_dir
):
os
.
mkdir
(
args
.
save_dir
)
np
.
save
(
os
.
path
.
join
(
args
.
save_dir
,
"img_clean_train_"
+
sess_name
),
inputs
)
...
...
generate_patches_holo_fromMAT.py
View file @
a3e852e4
...
...
@@ -70,7 +70,7 @@ args = parser.parse_args()
#print(args.params['patch_size'])
hparams
.
parse
(
args
.
params
)
import
ipdb
#
import ipdb
...
...
@@ -85,7 +85,9 @@ def generate_patches(isDebug=True):
#ipdb.set_trace()
print
(
"number of clean training data {0} and noisy {1}"
.
format
(
len
(
cleanmat
),
len
(
noisymat
)))
scales
=
1
#et on ne le bouge pas !!!! hparams.scales #old version [1, 0.9, 0.8, 0.7]
if
hparams
.
patch_size
>
hparams
.
originalsize
[
0
]:
sys
.
exit
(
'patch size > size of original size of images'
)
nb_image
=
len
(
cleanmat
)
nb_patch_per_image
=
int
((
hparams
.
originalsize
[
0
]
-
hparams
.
patch_size
)
/
hparams
.
stride
+
1
)
*
int
((
hparams
.
originalsize
[
1
]
-
hparams
.
patch_size
)
/
hparams
.
stride
+
1
)
#(1024 - 50)/50 + 1 = 20 -> 20*20 = 400 patch per img
...
...
@@ -169,7 +171,7 @@ def generate_patches(isDebug=True):
print
(
'shape of inputs: '
,
cleaninputs
.
shape
)
print
(
'amplitude of inputs: '
,
np
.
max
(
cleaninputs
),
np
.
min
(
cleaninputs
))
sess_name
=
extract_sess_name
(
hparams
.
train_patterns
,
hparams
.
train_noise
,
hparams
.
phase_type
)
sess_name
=
extract_sess_name
(
hparams
.
train_patterns
,
hparams
.
train_noise
,
hparams
.
phase_type
,
hparams
.
stride
,
hparams
.
patch_size
,
hparams
.
patch_per_image
)
if
not
os
.
path
.
exists
(
args
.
save_dir
):
os
.
mkdir
(
args
.
save_dir
)
np
.
save
(
os
.
path
.
join
(
args
.
save_dir
,
"img_clean_train_"
+
sess_name
),
cleaninputs
)
...
...
hparams.py
View file @
a3e852e4
...
...
@@ -35,20 +35,25 @@ __status__ = "Production"
# Default hyperparameters:
hparams
=
tf
.
contrib
.
training
.
HParams
(
#noise_src_dir = '/lium/raid01_c/tahon/holography/HOLODEEP/',
noise_src_dir
=
'/lium/raid01_c/tahon/holography/NATURAL/noisy'
,
#clean_src_dir = '/lium/raid01_c/tahon/holography/NOISEFREEHOLODEEP/',
clean_src_dir
=
'/lium/raid01_c/tahon/holography/NATURAL/original'
,
#to train on HOLODEEP tiff images
noise_src_dir
=
'/lium/raid01_c/tahon/holography/HOLODEEP/'
,
clean_src_dir
=
'/lium/raid01_c/tahon/holography/NOISEFREEHOLODEEP/'
,
eval_dir
=
'/lium/raid01_c/tahon/holography/HOLODEEPmat/'
,
#to train on matlab images
#eval_dir = '/lium/raid01_c/tahon/holography/HOLODEEPmat/',
#to train on natural images
#noise_src_dir = '/lium/raid01_c/tahon/holography/NATURAL/noisy',
#clean_src_dir = '/lium/raid01_c/tahon/holography/NATURAL/original',
#eval_dir = '/lium/raid01_c/tahon/holography/HOLODEEPmat/',
#test_dir = 'lium/raid01_c/tahon/holography/TEST/',
phase
=
't
est
'
,
#train or test phase
phase
=
't
rain
'
,
#train or test phase
#image
isDebug
=
False
,
#True,#reate only 10 patches
originalsize
=
(
1
80
,
180
),
#1024 for matlab database, 128 for holodeep database
originalsize
=
(
1
28
,
128
),
#1024 for matlab database, 128 for holodeep database
, 180 for natural images
phase_type
=
'two'
,
#keep phase between -pi and pi (phi), convert into cosinus (cos) or sinus (sin)
#select images for training
train_patterns
=
[
1
,
2
,
3
,
4
,
5
],
#number of images from 1 to 5
train_noise
=
[
0
,
1
,
1.5
,
2
,
2.5
],
train_patterns
=
[
1
,
2
,
3
],
#number of images from 1 to 5
train_noise
=
[
0
],
#
[0, 1, 1.5, 2, 2.5],
#select images for evaluation (during training)
eval_patterns
=
[
1
,
2
,
3
,
4
,
5
],
eval_noise
=
[
0
,
1
,
1.5
,
2
,
2.5
],
...
...
main_holo.py
View file @
a3e852e4
...
...
@@ -64,7 +64,9 @@ hparams.parse(args.params)
def
denoiser_train
(
denoiser
,
lr
):
#avec load_data les images sont déjà normalisée par 255.0
sess_name
=
extract_sess_name
(
hparams
.
train_patterns
,
hparams
.
train_noise
,
hparams
.
phase_type
)
sess_name
=
extract_sess_name
(
hparams
.
train_patterns
,
hparams
.
train_noise
,
hparams
.
phase_type
,
hparams
.
stride
,
hparams
.
patch_size
,
hparams
.
patch_per_image
)
#for training with natural images
#sess_name = 'natural_phi'
print
(
'session name: '
,
sess_name
)
train_data
=
load_train_data
(
filepath
=
args
.
save_dir
+
'img_clean_train_'
+
sess_name
+
'.npy'
,
noisyfilepath
=
args
.
save_dir
+
'img_noisy_train_'
+
sess_name
+
'.npy'
,
phase_type
=
hparams
.
phase_type
)
# if there is a small memory, please comment this line and uncomment the line99 in model.py
...
...
@@ -77,15 +79,15 @@ def denoiser_train(denoiser, lr):
eval_data
=
load_eval_data
(
hparams
.
eval_dir
,
hparams
.
eval_noise
,
hparams
.
eval_patterns
)
print
(
'train data shape:'
,
train_data
[
0
].
shape
,
type
(
train_data
))
print
(
'eval data shape:'
,
eval_data
[
0
][
0
].
shape
,
type
(
eval_data
))
denoiser
.
train
(
train_data
,
eval_data
,
batch_size
=
hparams
.
batch_size
,
ckpt_dir
=
args
.
ckpt_dir
,
epoch
=
hparams
.
epoch
,
lr
=
lr
,
sample_dir
=
args
.
sample_dir
,
phase_type
=
hparams
.
phase_type
)
denoiser
.
train
(
train_data
,
eval_data
,
batch_size
=
hparams
.
batch_size
,
ckpt_dir
=
args
.
ckpt_dir
,
epoch
=
hparams
.
epoch
,
lr
=
lr
,
sample_dir
=
args
.
sample_dir
,
phase_type
=
hparams
.
phase_type
,
nb_layers
=
hparams
.
nb_layers
)
def
denoiser_test
(
denoiser
):
#
noisy = load_test_data(args.noisy_img, key = 'NoisyPhase', flipupdown = args.flip)
noisy
=
load_test_data
(
args
.
noisy_img
,
key
=
'Phaseb'
,
flipupdown
=
args
.
flip
)
#pour vibPhase
noisy
=
load_test_data
(
args
.
noisy_img
,
key
=
'NoisyPhase'
,
flipupdown
=
args
.
flip
)
#
noisy = load_test_data(args.noisy_img, key = 'Phaseb', flipupdown = args.flip) #pour vibPhase
print
(
'load noisy ref'
)
if
args
.
clean_img
:
print
(
'load clean ref'
)
...
...
model.py
View file @
a3e852e4
...
...
@@ -21,10 +21,10 @@ def loss_function(ref, pred, phase_type):
return
tf
.
math
.
reduce_sum
(
ref
-
pred
)
def
dncnn
(
input
,
is_training
=
True
,
output_channels
=
1
):
def
dncnn
(
input
,
nb_layers
=
16
,
is_training
=
True
,
output_channels
=
1
):
with
tf
.
variable_scope
(
'block1'
):
output
=
tf
.
layers
.
conv2d
(
input
,
64
,
3
,
padding
=
'same'
,
activation
=
tf
.
nn
.
relu
)
#attention c'etait tf.nn.relu
for
layers
in
range
(
2
,
hparams
.
nb_layers
+
1
):
#4 + 1): #16 + 1
for
layers
in
range
(
2
,
nb_layers
+
1
):
#4 + 1): #16 + 1
with
tf
.
variable_scope
(
'block%d'
%
layers
):
output
=
tf
.
layers
.
conv2d
(
output
,
64
,
3
,
padding
=
'same'
,
name
=
'conv%d'
%
layers
,
use_bias
=
False
)
output
=
tf
.
nn
.
relu
(
tf
.
layers
.
batch_normalization
(
output
,
training
=
is_training
))
...
...
@@ -39,10 +39,11 @@ def dncnn(input, is_training=True, output_channels=1):
class
denoiser
(
object
):
def
__init__
(
self
,
sess
,
phase_type
=
'phi'
,
noise_type
=
'spkl'
,
input_c_dim
=
1
,
sigma
=
25
,
batch_size
=
64
):
def
__init__
(
self
,
sess
,
phase_type
=
'phi'
,
noise_type
=
'spkl'
,
input_c_dim
=
1
,
sigma
=
25
,
batch_size
=
64
,
nb_layers
=
16
):
self
.
sess
=
sess
self
.
input_c_dim
=
input_c_dim
self
.
sigma
=
sigma
self
.
nb_layers
=
nb_layers
# build model
self
.
Y_
=
tf
.
placeholder
(
tf
.
float32
,
[
None
,
None
,
None
,
self
.
input_c_dim
],
name
=
'clean_image'
)
# tf.placeholder(dtype, shape= .., name = ..)
self
.
is_training
=
tf
.
placeholder
(
tf
.
bool
,
name
=
'is_training'
)
...
...
@@ -55,7 +56,7 @@ class denoiser(object):
else
:
print
(
'noise type not exists'
)
sys
.
exit
()
self
.
Y
=
dncnn
(
self
.
X
,
is_training
=
self
.
is_training
)
#predict residual from noisy input
self
.
Y
=
dncnn
(
self
.
X
,
self
.
nb_layers
,
is_training
=
self
.
is_training
)
#predict residual from noisy input
#self.loss = (1.0 / batch_size) * tf.nn.l2_loss(tf.clip_by_value(self.Y, -np.pi, np.pi) - self.Y_)#loss between clean ref and clean pred
self
.
loss
=
(
1.0
/
batch_size
)
*
tf
.
nn
.
l2_loss
(
self
.
Y
-
self
.
Y_
)
#loss between clean ref and clean pred
#self.loss = (1.0 / batch_size) * loss_function(self.Y_, self.Y, phase_type)
...
...
@@ -138,8 +139,9 @@ class denoiser(object):
clean_pred
,
psnr
=
self
.
sess
.
run
([
self
.
Y
,
self
.
eva_psnr
],
feed_dict
=
{
self
.
Y_
:
data_clean
,
self
.
X
:
data_noisy
,
self
.
is_training
:
False
})
return
output_clean_image
,
noisy_image
,
psnr
def
train
(
self
,
data
,
eval_data
,
batch_size
,
ckpt_dir
_
,
epoch
,
lr
,
sample_dir
,
phase_type
,
eval_every_epoch
=
5
):
def
train
(
self
,
data
,
eval_data
,
batch_size
,
ckpt_dir
,
epoch
,
lr
,
sample_dir
,
phase_type
,
nb_layers
,
eval_every_epoch
=
2
):
phase_augmentation
=
True
ckpt_dir_
=
ckpt_dir
sess_name
=
'run-test'
+
str
(
datetime
.
now
()).
replace
(
' '
,
'_'
)
ckpt_dir
=
ckpt_dir_
+
'/'
+
sess_name
+
'/'
sample_dir
=
sample_dir
+
'/'
+
sess_name
+
'/'
...
...
@@ -218,7 +220,7 @@ class denoiser(object):
batch_images_noisy
=
data_noisy
[
batch_id
*
batch_size
:(
batch_id
+
1
)
*
batch_size
,
:,
:,
:]
fres
.
write
(
'max/min train: %.2f %.2f (clean), %.2f %.2f (noisy)
\n
'
%
(
np
.
max
(
batch_images_clean
),
np
.
min
(
batch_images_clean
),
np
.
max
(
batch_images_noisy
),
np
.
min
(
batch_images_noisy
)))
# batch_images = batch_images.astype(np.float32) / 255.0 # normalize the data to 0-1
_
,
loss
,
summary
=
self
.
sess
.
run
([
self
.
train_op
,
self
.
loss
,
merged
],
feed_dict
=
{
self
.
X
:
batch_images_noisy
,
self
.
Y_
:
batch_images_clean
,
self
.
lr
:
lr
[
epoch
],
self
.
is_training
:
True
})
_
,
loss
,
summary
=
self
.
sess
.
run
([
self
.
train_op
,
self
.
loss
,
merged
],
feed_dict
=
{
self
.
X
:
batch_images_noisy
,
self
.
Y_
:
batch_images_clean
,
self
.
lr
:
lr
[
epoch
],
self
.
nb_layers
:
nb_layers
,
self
.
is_training
:
True
})
fres
.
write
(
"Epoch: [%2d] [%4d/%4d] time: %4.4f, loss: %.6f
\n
"
%
(
epoch
+
1
,
batch_id
+
1
,
numBatch
,
time
.
time
()
-
start_time
,
loss
))
print
(
"Epoch: [%2d] [%4d/%4d] time: %4.4f, loss: %.6f"
%
(
epoch
+
1
,
batch_id
+
1
,
numBatch
,
time
.
time
()
-
start_time
,
loss
))
iter_num
+=
1
...
...
utils.py
View file @
a3e852e4
...
...
@@ -36,7 +36,7 @@ import numpy as np
import
tensorflow
as
tf
from
PIL
import
Image
from
scipy.io
import
loadmat
,
savemat
from
glob
import
glob
#import ipdb
__license__
=
"LGPL"
...
...
@@ -49,8 +49,10 @@ __status__ = "Production"
def
extract_sess_name
(
lp
,
ln
,
pt
):
return
'-'
.
join
(
map
(
str
,
lp
))
+
'_'
+
'-'
.
join
(
map
(
str
,
ln
))
+
pt
def
extract_sess_name
(
lp
,
ln
,
pt
,
stride
,
ps
,
np
):
#example of the call of the function:
#sess_name = extract_sess_name(hparams.train_patterns, hparams.train_noise, hparams.phase_type, hparams.stride, hparams.patch_size, hparams.patch_per_image)
return
'-'
.
join
(
map
(
str
,
lp
))
+
'_'
+
'-'
.
join
(
map
(
str
,
ln
))
+
'_'
+
pt
+
'_'
+
str
(
stride
)
+
'_'
+
str
(
ps
)
+
'_'
+
str
(
np
)
def
get_files
(
path
,
regexp
):
list_files
=
[]
...
...
@@ -63,6 +65,24 @@ def get_files(path, regexp):
list_files
.
append
(
path
.
joinpath
(
name
))
return
sorted
(
list_files
)
def
from_NATURAL
(
dir_noise
,
dir_clean
,
path_only
):
select_noisy
=
sorted
(
glob
(
dir_noise
+
'/*.png'
))
select_clean
=
sorted
(
glob
(
dir_clean
+
'/*.png'
))
if
path_only
:
#return only the filenames, not the images
return
select_clean
,
select_noisy
else
:
#return the images directly, not only the filenames
data_clean
=
[]
for
file
in
select_clean
:
#ipdb.set_trace()
im
=
Image
.
open
(
file
).
convert
(
'L'
)
data_clean
.
append
(
np
.
array
(
im
).
reshape
(
1
,
im
.
size
[
1
],
im
.
size
[
0
],
1
))
data_noisy
=
[]
for
file
in
select_noisy
:
im
=
Image
.
open
(
file
).
convert
(
'L'
)
data_noisy
.
append
(
np
.
array
(
im
).
reshape
(
1
,
im
.
size
[
1
],
im
.
size
[
0
],
1
))
return
data_clean
,
data_noisy
def
from_HOLODEEP
(
dir_noise
,
dir_clean
,
noise_eval
,
img_eval
,
path_only
):
pattern
=
{
1
:
(
'0'
,
'1'
),
2
:
(
'0'
,
'2'
),
3
:
(
'0'
,
'3'
),
4
:(
'73'
,
'1'
),
5
:(
'100'
,
'1'
)}
...
...
@@ -77,12 +97,11 @@ def from_HOLODEEP(dir_noise, dir_clean, noise_eval, img_eval, path_only):
#regExp = re.compile(r regExp)
print
(
regExp
)
if
path_only
:
select_noisy
=
get_files
(
pathlib
.
Path
(
dir_noise
),
regExp
)
select_clean
=
get_files
(
pathlib
.
Path
(
dir_clean
),
regExp
)
return
select_clean
,
select_noisy
select_noisy
=
get_files
(
pathlib
.
Path
(
dir_noise
),
regExp
)
select_clean
=
get_files
(
pathlib
.
Path
(
dir_clean
),
regExp
)
print
(
'selected noisy / clean files:'
,
len
(
select_noisy
),
len
(
select_clean
))
if
path_only
:
#return only the filenames, not the images
return
select_clean
,
select_noisy
#from load_images
#pixel value range 0-255
...
...
@@ -92,18 +111,17 @@ def from_HOLODEEP(dir_noise, dir_clean, noise_eval, img_eval, path_only):
#data = np.array(im).reshape(1, im.size[1], im.size[0], 1)
#return data
data_clean
=
[]
for
file
in
select_clean
:
#ipdb.set_trace()
im
=
Image
.
open
(
file
).
convert
(
'L'
)
data_clean
.
append
(
np
.
array
(
im
).
reshape
(
1
,
im
.
size
[
1
],
im
.
size
[
0
],
1
))
data_noisy
=
[]
for
file
in
select_noisy
:
im
=
Image
.
open
(
file
).
convert
(
'L'
)
data_noisy
.
append
(
np
.
array
(
im
).
reshape
(
1
,
im
.
size
[
1
],
im
.
size
[
0
],
1
))
return
data_clean
,
data_noisy
# return select_noisy, select_clean
else
:
#return the images directly, not only the filenames
data_clean
=
[]
for
file
in
select_clean
:
#ipdb.set_trace()
im
=
Image
.
open
(
file
).
convert
(
'L'
)
data_clean
.
append
(
np
.
array
(
im
).
reshape
(
1
,
im
.
size
[
1
],
im
.
size
[
0
],
1
))
data_noisy
=
[]
for
file
in
select_noisy
:
im
=
Image
.
open
(
file
).
convert
(
'L'
)
data_noisy
.
append
(
np
.
array
(
im
).
reshape
(
1
,
im
.
size
[
1
],
im
.
size
[
0
],
1
))
return
data_clean
,
data_noisy
def
from_DATABASE
(
dir_data
,
noise_eval
,
img_eval
,
flipupdown
=
False
):
select_noisy
=
[]
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment