Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Marie Tahon
DnCNN-tensorflow-holography
Commits
bda43a6f
Commit
bda43a6f
authored
Oct 22, 2019
by
Marie Tahon
Browse files
add nb of layers in params
parent
d415b988
Changes
3
Hide whitespace changes
Inline
Side-by-side
README.md
View file @
bda43a6f
...
...
@@ -34,6 +34,7 @@ Modifiable parameters are located in hparams.py
sigma = 25, #noise level for gaussian denoising not used in this project
#Training
nb_layers = 4,#nb of intermediate convolutional layers (original size is 16)
batch_size = 64,#nb of patches per batch
patch_per_image = 350, # Silvio a utilisé 384 pour des images 1024*1024
patch_size = 50, #size of training images.
...
...
hparams.py
View file @
bda43a6f
...
...
@@ -36,15 +36,15 @@ __status__ = "Production"
# Default hyperparameters:
hparams
=
tf
.
contrib
.
training
.
HParams
(
#noise_src_dir = '/lium/raid01_c/tahon/holography/HOLODEEP/',
noise_src_dir
=
'/lium/raid01_c/tahon/holography/NATURAL/'
,
noise_src_dir
=
'/lium/raid01_c/tahon/holography/NATURAL/
noisy
'
,
#clean_src_dir = '/lium/raid01_c/tahon/holography/NOISEFREEHOLODEEP/',
clean_src_dir
=
'/lium/raid01_c/tahon/holography/N
OISEFREENATURAL/
'
,
clean_src_dir
=
'/lium/raid01_c/tahon/holography/N
ATURAL/original
'
,
eval_dir
=
'/lium/raid01_c/tahon/holography/HOLODEEPmat/'
,
#test_dir = 'lium/raid01_c/tahon/holography/TEST/',
phase
=
't
rain
'
,
#train or test phase
phase
=
't
est
'
,
#train or test phase
#image
isDebug
=
False
,
#True,#reate only 10 patches
originalsize
=
(
1
024
,
1024
),
#1024 for matlab database, 128 for holodeep database
originalsize
=
(
1
80
,
180
),
#1024 for matlab database, 128 for holodeep database
phase_type
=
'two'
,
#keep phase between -pi and pi (phi), convert into cosinus (cos) or sinus (sin)
#select images for training
train_patterns
=
[
1
,
2
,
3
,
4
,
5
],
#number of images from 1 to 5
...
...
@@ -60,8 +60,9 @@ hparams = tf.contrib.training.HParams(
sigma
=
25
,
#noise level for gaussian denoising
#Training
nb_layers
=
4
,
#original number is 16
batch_size
=
64
,
#128
patch_per_image
=
384
,
#
Silvio a utilisé 384 pour des images 1024*1024
patch_per_image
=
9
,
#9 pour des images 180*180 (NATURAL)
Silvio a utilisé 384 pour des images 1024*1024
(MATLAB)
patch_size
=
50
,
#Silvio a utilisé 50.
epoch
=
2000
,
#2000
lr
=
0.001
,
# learning rate
...
...
model.py
View file @
bda43a6f
...
...
@@ -24,7 +24,7 @@ def loss_function(ref, pred, phase_type):
def
dncnn
(
input
,
is_training
=
True
,
output_channels
=
1
):
with
tf
.
variable_scope
(
'block1'
):
output
=
tf
.
layers
.
conv2d
(
input
,
64
,
3
,
padding
=
'same'
,
activation
=
tf
.
nn
.
relu
)
#attention c'etait tf.nn.relu
for
layers
in
range
(
2
,
4
+
1
):
#4 + 1): #16 + 1
for
layers
in
range
(
2
,
hparams
.
nb_layers
+
1
):
#4 + 1): #16 + 1
with
tf
.
variable_scope
(
'block%d'
%
layers
):
output
=
tf
.
layers
.
conv2d
(
output
,
64
,
3
,
padding
=
'same'
,
name
=
'conv%d'
%
layers
,
use_bias
=
False
)
output
=
tf
.
nn
.
relu
(
tf
.
layers
.
batch_normalization
(
output
,
training
=
is_training
))
...
...
@@ -138,10 +138,10 @@ class denoiser(object):
clean_pred
,
psnr
=
self
.
sess
.
run
([
self
.
Y
,
self
.
eva_psnr
],
feed_dict
=
{
self
.
Y_
:
data_clean
,
self
.
X
:
data_noisy
,
self
.
is_training
:
False
})
return
output_clean_image
,
noisy_image
,
psnr
def
train
(
self
,
data
,
eval_data
,
batch_size
,
ckpt_dir
,
epoch
,
lr
,
sample_dir
,
phase_type
,
eval_every_epoch
=
5
):
def
train
(
self
,
data
,
eval_data
,
batch_size
,
ckpt_dir
_
,
epoch
,
lr
,
sample_dir
,
phase_type
,
eval_every_epoch
=
5
):
phase_augmentation
=
True
sess_name
=
'run-test'
+
str
(
datetime
.
now
()).
replace
(
' '
,
'_'
)
ckpt_dir
=
ckpt_dir
+
'/'
+
sess_name
+
'/'
ckpt_dir
=
ckpt_dir
_
+
'/'
+
sess_name
+
'/'
sample_dir
=
sample_dir
+
'/'
+
sess_name
+
'/'
if
not
os
.
path
.
exists
(
ckpt_dir
):
os
.
makedirs
(
ckpt_dir
)
...
...
@@ -179,7 +179,7 @@ class denoiser(object):
fres
.
write
(
'Nb of batches is :%d
\n
'
%
(
numBatch
))
print
(
'Nb of batches is :'
,
numBatch
)
# load pretrained model
load_model_status
,
global_step
=
self
.
load
(
ckpt_dir
)
load_model_status
,
global_step
=
self
.
load
(
ckpt_dir
_
)
if
load_model_status
:
iter_num
=
global_step
start_epoch
=
global_step
//
numBatch
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment