Commit 26de6b23 authored by Laanait, Nouamane's avatar Laanait, Nouamane

removing hard-coded parameters from ynet architecture generation

parent 8fa8b888
Pipeline #80213 failed with stage
in 2 minutes and 34 seconds
......@@ -108,8 +108,8 @@ def calc_loss(n_net, scope, hyper_params, params, labels, step=None, images=None
#Assemble all of the losses.
losses = tf.get_collection(tf.GraphKeys.LOSSES)
if hyper_params['network_type'] == 'YNet':
#losses = [inverter_loss , decoder_loss_re, decoder_loss_im, 0.01 * reg_loss]
losses = [inverter_loss , decoder_loss_re, decoder_loss_im]
losses = [inverter_loss , decoder_loss_re, decoder_loss_im, reg_loss]
# losses = [inverter_loss , decoder_loss_re, decoder_loss_im]
# losses, prefac = ynet_adjusted_losses(losses, step)
# tf.summary.scalar("prefac_inverter", prefac)
# losses = [inverter_loss]
......
......@@ -611,7 +611,7 @@ class ConvNet:
input= tf.cast(input, tf.float32)
# with tf.variable_scope('layer_normalization', reuse=None) as scope:
# output = tf.keras.layers.LayerNormalization(trainable=False)(inputs=input)
mean , variance = tf.nn.moments(input, axes=[2,3], keepdims=True)
mean , variance = tf.nn.moments(input, axes=[2,3], keep_dims=True)
output = (input - mean)/ (tf.sqrt(variance) + 1e-7)
if self.params['IMAGE_FP16']:
output = tf.cast(output, tf.float16)
......@@ -2861,7 +2861,7 @@ class YNet(FCDenseNet, FCNet):
# out = tf.transpose(out, perm= [1, 2, 0])
dim = int(math.sqrt(self.images.shape.as_list()[1]))
out = tf.reshape(out, [self.params['batch_size'], -1, dim, dim])
self.print_rank('decoder reshape:', out.shape.as_list())
with tf.variable_scope('%s_conv_1by1' % subnet, reuse=self.reuse) as scope:
out, _ = self._conv(input=out, params=conv_1by1)
do_bn = conv_1by1.get('batch_norm', False)
......
......@@ -403,7 +403,6 @@ def generate_YNet_json(save= True, out_dir='json_files', n_pool=3, n_layers_per_
fc_cvae = OrderedDict({'type': 'fully_connected','weights': fc_dim,'bias': fc_dim, 'activation': activation,
'regularize': True})
cvae_model = OrderedDict({'n_conv_layers': 4, 'n_fc_layers':fc_layers,'fc_params': fc_cvae, 'conv_params':conv_cvae})
init_features = 1024
freq2space_block = OrderedDict({'type': 'freq2space', 'activation': activation, 'dropout': dropout_prob,
'init_features':init_features, 'batch_norm': batch_norm})
freq2space_block['type'] = 'freq2space_CVAE'
......@@ -423,7 +422,7 @@ def generate_YNet_json(save= True, out_dir='json_files', n_pool=3, n_layers_per_
'activation': activation, 'padding': 'SAME', 'batch_norm': batch_norm, 'dropout':dropout_prob})
deconv_layer_base = OrderedDict({'type': "deconv_2D", 'stride': [2, 2], 'kernel': [4,4], 'features': None,
'padding': 'SAME', 'upsample': pool['kernel'][0]})
features = 1024
features = init_features
rank = 0
for i in range(n_pool+1):
deconv_layer = deepcopy(deconv_layer_base)
......@@ -449,7 +448,7 @@ def generate_YNet_json(save= True, out_dir='json_files', n_pool=3, n_layers_per_
#############################
layers_params_list = []
layers_keys_list = []
features = 1024
features = init_features
rank = 0
for i in range(n_pool+1):
deconv_layer = deepcopy(deconv_layer_base)
......@@ -483,7 +482,7 @@ def generate_YNet_json(save= True, out_dir='json_files', n_pool=3, n_layers_per_
'activation': activation, 'padding': 'SAME', 'batch_norm': batch_norm, 'dropout':dropout_prob})
deconv_layer_base = OrderedDict({'type': "deconv_2D", 'stride': [2, 2], 'kernel': [4,4], 'features': None,
'padding': 'SAME', 'upsample': pool['kernel'][0]})
features = 1024
features = init_features
rank = 0
for i in range(n_pool+1):
deconv_layer = deepcopy(deconv_layer_base)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment