Commit ef675861 authored by Laanait, Nouamane's avatar Laanait, Nouamane

bug fixes and removing final model ckpt saving (tf doesn't flush storage)

parent 43a4abcd
......@@ -628,9 +628,9 @@ class DatasetLMDB(DatasetTFRecords):
return glimpse_batch
def random_crop_resize(self, images):
x_down = tf.random_uniform([self.params['batch_size']], minval=0., maxval=0.1)
x_down = tf.random_uniform([self.params['batch_size']], minval=0., maxval=0.05)
x_up = 1 - x_down
offset = tf.random_uniform([self.params['batch_size']], minval=0., maxval=0.1)
offset = tf.random_uniform([self.params['batch_size']], minval=0., maxval=0.05)
y_down = x_down + offset
y_up = x_up + offset
boxes = tf.stack([y_down, x_down, y_up, x_up], axis=1)
......
......@@ -26,7 +26,6 @@ def _add_loss_summaries(total_loss, losses, summaries=False):
loss_averages_op = tf.no_op(name='no_op')
return loss_averages_op
def calc_loss(n_net, scope, hyper_params, params, labels, step=None, images=None, summary=False):
labels_shape = labels.get_shape().as_list()
layer_params={'bias':labels_shape[-1], 'weights':labels_shape[-1],'regularize':True}
......@@ -89,7 +88,7 @@ def calc_loss(n_net, scope, hyper_params, params, labels, step=None, images=None
psi_out_mod = thin_object(probe_re, probe_im, pot)
reg_loss = 10 * calculate_loss_regressor(psi_out_mod, tf.reduce_mean(images, axis=[1], keepdims=True),
params, hyper_params, weight=weight)
tf.summary.scalar(' loss ', reg_loss)
tf.summary.scalar('reg_loss ', reg_loss)
tf.summary.scalar('Inverter loss ', inverter_loss)
tf.summary.scalar('Decoder loss (IM)', decoder_loss_im)
tf.summary.scalar('Decoder loss (RE)', decoder_loss_re)
......@@ -121,7 +120,6 @@ def calc_loss(n_net, scope, hyper_params, params, labels, step=None, images=None
loss_averages_op = _add_loss_summaries(total_loss, losses, summaries=summary)
return total_loss, loss_averages_op
def fully_connected(n_net, layer_params, batch_size, wd=0, name=None, reuse=None):
input = tf.cast(tf.reshape(n_net.model_output,[batch_size, -1]), tf.float32)
dim_input = input.shape[1].value
......@@ -144,7 +142,6 @@ def fully_connected(n_net, layer_params, batch_size, wd=0, name=None, reuse=None
n_net.scopes.append(output_scope)
return output
def calculate_loss_classifier(net_output, labels, params, hyper_params, summary=False):
"""
Calculate the loss objective for classification
......@@ -165,7 +162,6 @@ def calculate_loss_classifier(net_output, labels, params, hyper_params, summary=
tf.add_to_collection(tf.GraphKeys.LOSSES, cross_entropy_mean)
return cross_entropy_mean
def calculate_loss_regressor(net_output, labels, params, hyper_params, weight=None, summary=False, global_step=None):
"""
Calculate the loss objective for regression
......@@ -220,7 +216,6 @@ def calculate_loss_regressor(net_output, labels, params, hyper_params, weight=No
cost = tf.losses.log_loss(labels, weights=weight, predictions=net_output, reduction=tf.losses.Reduction.MEAN)
return cost
def ynet_adjusted_losses(losses, global_step):
'''
Schedule the different loss components based on global training step
......@@ -258,18 +253,23 @@ def fftshift(tensor, tens_format='NCHW'):
def thin_object(psi_k_re, psi_k_im, potential):
mask = np.zeros(psi_k_re.shape.as_list(), dtype=np.float32)
<<<<<<< HEAD
ratio = 0.33
center = slice(int(ratio * mask.shape[-1]), int((1-ratio)* mask.shape[-1]))
=======
center = slice(mask.shape[-1]//5, 4 * mask.shape[-1]//5)
>>>>>>> 7481c25a03030a0f934ca37dc0700aab3cc9409f
ratio = 0
if ratio == 0:
center = slice(None, None)
else:
center = slice(int(ratio * mask.shape[-1]), int((1-ratio)* mask.shape[-1]))
mask[:,:,center,center] = 1.
mask = tf.constant(mask, dtype=tf.complex64)
psi_x = fftshift(tf.ifft2d(mask * tf.cast(psi_k_re, tf.complex64) * tf.exp( 1.j * tf.cast(psi_k_im, tf.complex64))))
scan_range = psi_x.shape.as_list()[-1]//2
vx, vy = np.linspace(-scan_range, scan_range, num=4), np.linspace(-scan_range, scan_range, num=4)
X, Y = np.meshgrid(vx.astype(np.int), vy.astype(np.int))
psi_x_stack = [tf.roll(psi_x, shift=[x,y], axis=[1,2]) for (x,y) in zip(X.flatten(), Y.flatten())]
psi_x_stack = tf.concat(psi_x_stack, axis=1)
pot_frac = tf.exp(1.j * tf.cast(potential, tf.complex64))
psi_out = tf.fft2d(mask * psi_x * pot_frac / np.prod(psi_x.shape.as_list()))
psi_out = tf.fft2d(mask * psi_x_stack * pot_frac / np.prod(psi_x.shape.as_list()))
psi_out_mod = tf.cast(tf.abs(psi_out), tf.float32) ** 2
tf.summary.image('Psi_k_out', tf.transpose(psi_out_mod, perm=[0,2,3,1]), max_outputs=1)
psi_out_mod = tf.reduce_mean(psi_out_mod, axis=1, keep_dims=True)
tf.summary.image('Psi_k_out', tf.transpose(tf.abs(psi_out_mod)**0.25, perm=[0,2,3,1]), max_outputs=1)
tf.summary.image('Psi_x_in', tf.transpose(tf.abs(psi_x)**0.25, perm=[0,2,3,1]), max_outputs=1)
return psi_out_mod
\ No newline at end of file
......@@ -13,7 +13,6 @@ from copy import deepcopy
import tensorflow as tf
import numpy as np
import horovod.tensorflow as hvd
from .mp_wrapper import mp_regularizer_wrapper
worker_name='model'
tf.logging.set_verbosity(tf.logging.ERROR)
......@@ -77,8 +76,6 @@ class ConvNet:
self.model_output = None
self.scopes = []
# self.initializer = self._get_initializer(hyper_params.get('initializer', None))
def print_rank(self, *args, **kwargs):
if hvd.rank() == 0 and self.operation == 'train':
print(*args, **kwargs)
......@@ -170,7 +167,6 @@ class ConvNet:
"""
pass
# @staticmethod
def _get_initializer(self, params):
"""
Returns an Initializer object for initializing weights
......@@ -241,7 +237,6 @@ class ConvNet:
# self.print_verbose('Using default Xavier instead')
return tf.contrib.layers.xavier_initializer()
def get_loss(self):
# with tf.variable_scope(self.scope, reuse=self.reuse) as scope:
if self.net_type == 'hybrid': self._calculate_loss_hybrid()
......@@ -2836,10 +2831,10 @@ class YNet(FCDenseNet, FCNet):
params = self.network['encoder']['freq2space']
fully_connected = params['cvae_params']['fc_params']
num_fc = params['cvae_params']['n_fc_layers']
conv_1by1 = OrderedDict({'type': 'conv_2D', 'stride': [1, 1], 'kernel': [1, 1],
conv_1by1 = OrderedDict({'type': 'conv_2D', 'stride': [1, 1], 'kernel': [3, 3],
'features': params['init_features'],
'activation': "relu",
'padding': 'VALID',
'padding': 'SAME',
'batch_norm': True, 'dropout':0})
# def fc_map(tens):
# for i in range(num_fc):
......@@ -2873,10 +2868,10 @@ class YNet(FCDenseNet, FCNet):
params = self.network['encoder']['freq2space']
fully_connected = params['cvae_params']['fc_params']
num_fc = params['cvae_params']['n_fc_layers']
conv_1by1 = OrderedDict({'type': 'conv_2D', 'stride': [1, 1], 'kernel': [1, 1],
conv_1by1 = OrderedDict({'type': 'conv_2D', 'stride': [1, 1], 'kernel': [3, 3],
'features': params['init_features'],
'activation': "relu",
'padding': 'VALID',
'padding': 'SAME',
'batch_norm': True, 'dropout':0})
def fc_map(tens):
for i in range(num_fc):
......
......@@ -421,7 +421,7 @@ def generate_YNet_json(save= True, out_dir='json_files', n_pool=3, n_layers_per_
layers_keys_list = []
conv_layer_base = OrderedDict({'type': conv_type, 'stride': [1, 1], 'kernel': kernel, 'features': None,
'activation': 'relu', 'padding': 'SAME', 'batch_norm': batch_norm, 'dropout':dropout_prob})
deconv_layer_base = OrderedDict({'type': "deconv_2D", 'stride': [2, 2], 'kernel': [3,3], 'features': None,
deconv_layer_base = OrderedDict({'type': "deconv_2D", 'stride': [2, 2], 'kernel': [4,4], 'features': None,
'padding': 'SAME', 'upsample': pool['kernel'][0]})
features = 1024
rank = 0
......@@ -481,7 +481,7 @@ def generate_YNet_json(save= True, out_dir='json_files', n_pool=3, n_layers_per_
conv_layer_base = OrderedDict({'type': conv_type, 'stride': [1, 1], 'kernel': kernel, 'features': None,
'activation': 'relu', 'padding': 'SAME', 'batch_norm': batch_norm, 'dropout':dropout_prob})
deconv_layer_base = OrderedDict({'type': "deconv_2D", 'stride': [2, 2], 'kernel': [3,3], 'features': None,
deconv_layer_base = OrderedDict({'type': "deconv_2D", 'stride': [2, 2], 'kernel': [4,4], 'features': None,
'padding': 'SAME', 'upsample': pool['kernel'][0]})
features = 1024
rank = 0
......
......@@ -410,8 +410,6 @@ def train(network_config, hyper_params, params):
if doValidate:
validate(network_config, hyper_params, params, sess, dset)
if doFinish:
saver.save(sess, checkpoint_file, global_step=train_elf.last_step)
print_rank('Saved Final Checkpoint.')
return
def train_inverter(network_config, hyper_params, params):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment