Loading stemdl/losses.py +9 −1 Original line number Diff line number Diff line Loading @@ -93,6 +93,14 @@ def calc_loss(n_net, scope, hyper_params, params, labels, step=None, images=None # weight = tf.constant(mask)1 decoder_loss_im = calculate_loss_regressor(probe_im, probe_labels_im, params, hyper_params, weight=weight) decoder_loss_re = calculate_loss_regressor(probe_re, probe_labels_re, params, hyper_params, weight=weight) # psi_comp = tf.fft2d(tf.cast(probe_re, tf.complex64) * tf.exp( 1.j * tf.cast(probe_im, tf.complex64))) # pot_frac = tf.exp(1.j * tf.cast(pot, tf.complex64)) # reg_term = tf.fft2d(psi_comp * pot_frac / np.prod(psi_comp.shape.as_list())) # reg_term = tf.cast(tf.abs(reg_term), tf.float32) # reg_loss = calculate_loss_regressor(reg_term, tf.reduce_mean(images, axis=[1], keepdims=True), # params, hyper_params, weight=weight) # tf.summary.image('Regularization', tf.transpose(reg_term, perm=[0,2,3,1]), max_outputs=4) # tf.summary.image('Pot_realspace', tf.transpose(tf.abs(psi_comp), perm=[0,2,3,1]), max_outputs=4) tf.summary.scalar('Inverter loss (raw)', inverter_loss) tf.summary.scalar('Decoder loss (IM)', decoder_loss_im) tf.summary.scalar('Decoder loss (RE)', decoder_loss_re) Loading stemdl/network.py +25 −25 Original line number Diff line number Diff line Loading @@ -2854,18 +2854,18 @@ class YNet(FCDenseNet, FCNet): 'activation': 'relu', 'padding': 'VALID', 'batch_norm': True, 'dropout':0.0}) # if True: # def fc_map(tens): # for i in range(num_fc): # with tf.variable_scope('%s_fc_%d' %(subnet, i), reuse=self.reuse) as scope : # tens = self._linear(input=tens, params=fully_connected) # tens = self._activate(input=tens, params=fully_connected) # # scopes_list.append(scope) # return tens # out = tf.map_fn(fc_map, out, back_prop=True) # out = tf.transpose(out, perm= [1, 2, 0]) # dim = int(math.sqrt(self.images.shape.as_list()[1])) # out = tf.reshape(out, [self.params['batch_size'], -1, dim, dim]) if True: def fc_map(tens): for i in range(num_fc): with tf.variable_scope('%s_fc_%d' %(subnet, i), reuse=self.reuse) as scope : tens = self._linear(input=tens, params=fully_connected) tens = self._activate(input=tens, params=fully_connected) scopes_list.append(scope) return tens out = tf.map_fn(fc_map, out, back_prop=True) out = tf.transpose(out, perm= [1, 2, 0]) dim = int(math.sqrt(self.images.shape.as_list()[1])) out = tf.reshape(out, [self.params['batch_size'], -1, dim, dim]) # else: # out = tf.reshape(out, [out_shape[0]*out_shape[1], out_shape[2], out_shape[3], out_shape[4]]) # with tf.variable_scope('%s_conv_1by1_1' % subnet, reuse=self.reuse) as scope: Loading Loading @@ -2898,7 +2898,7 @@ class YNet(FCDenseNet, FCNet): # out = self._activate(input=out, params=conv_1by1) # scopes_list.append(scope) # self.model_output[subnet] = out # self.all_scopes[subnet] += scopes_list self.all_scopes[subnet] += scopes_list def build_inverter(self): out = self.model_output['encoder'] Loading @@ -2906,18 +2906,18 @@ class YNet(FCDenseNet, FCNet): fully_connected = params['fc_params'] num_fc = params['n_fc_layers'] scopes_list = [] # if True: # def fc_map(tens): # for i in range(num_fc): # with tf.variable_scope('Inverter_fc_%d' %i, reuse=self.reuse) as scope : # tens = self._linear(input=tens, params=fully_connected) # tens = self._activate(input=tens, params=fully_connected) # # scopes_list.append(scope) # return tens # out = tf.map_fn(fc_map, out, back_prop=True, swap_memory=True, parallel_iterations=256) # out = tf.transpose(out, perm= [1, 2, 0]) # dim = int(math.sqrt(self.images.shape.as_list()[1])) # out = tf.reshape(out, [self.params['batch_size'], -1, dim, dim]) if True: def fc_map(tens): for i in range(num_fc): with tf.variable_scope('Inverter_fc_%d' %i, reuse=self.reuse) as scope : tens = self._linear(input=tens, params=fully_connected) tens = self._activate(input=tens, params=fully_connected) scopes_list.append(scope) return tens out = tf.map_fn(fc_map, out, back_prop=True, swap_memory=True, parallel_iterations=256) out = tf.transpose(out, perm= [1, 2, 0]) dim = int(math.sqrt(self.images.shape.as_list()[1])) out = tf.reshape(out, [self.params['batch_size'], -1, dim, dim]) # else: # conv_1by1_1 = OrderedDict({'type': 'conv_2D', 'stride': [1, 1], 'kernel': [1, 1], # 'features': 1, Loading stemdl/runtime.py +2 −0 Original line number Diff line number Diff line Loading @@ -952,6 +952,8 @@ def validate_ckpt(network_config, hyper_params, params, num_batches=None, elif hyper_params['network_type'] == 'hybrid': pass elif hyper_params['network_type'] == 'inverter': if labels.shape.as_list()[1] > 1: labels, _, _ = [tf.expand_dims(itm, axis=1) for itm in tf.unstack(labels, axis=1)] loss_params = hyper_params['loss_function'] if params['output']: output = tf.cast(n_net.model_output, tf.float32) Loading Loading
stemdl/losses.py +9 −1 Original line number Diff line number Diff line Loading @@ -93,6 +93,14 @@ def calc_loss(n_net, scope, hyper_params, params, labels, step=None, images=None # weight = tf.constant(mask)1 decoder_loss_im = calculate_loss_regressor(probe_im, probe_labels_im, params, hyper_params, weight=weight) decoder_loss_re = calculate_loss_regressor(probe_re, probe_labels_re, params, hyper_params, weight=weight) # psi_comp = tf.fft2d(tf.cast(probe_re, tf.complex64) * tf.exp( 1.j * tf.cast(probe_im, tf.complex64))) # pot_frac = tf.exp(1.j * tf.cast(pot, tf.complex64)) # reg_term = tf.fft2d(psi_comp * pot_frac / np.prod(psi_comp.shape.as_list())) # reg_term = tf.cast(tf.abs(reg_term), tf.float32) # reg_loss = calculate_loss_regressor(reg_term, tf.reduce_mean(images, axis=[1], keepdims=True), # params, hyper_params, weight=weight) # tf.summary.image('Regularization', tf.transpose(reg_term, perm=[0,2,3,1]), max_outputs=4) # tf.summary.image('Pot_realspace', tf.transpose(tf.abs(psi_comp), perm=[0,2,3,1]), max_outputs=4) tf.summary.scalar('Inverter loss (raw)', inverter_loss) tf.summary.scalar('Decoder loss (IM)', decoder_loss_im) tf.summary.scalar('Decoder loss (RE)', decoder_loss_re) Loading
stemdl/network.py +25 −25 Original line number Diff line number Diff line Loading @@ -2854,18 +2854,18 @@ class YNet(FCDenseNet, FCNet): 'activation': 'relu', 'padding': 'VALID', 'batch_norm': True, 'dropout':0.0}) # if True: # def fc_map(tens): # for i in range(num_fc): # with tf.variable_scope('%s_fc_%d' %(subnet, i), reuse=self.reuse) as scope : # tens = self._linear(input=tens, params=fully_connected) # tens = self._activate(input=tens, params=fully_connected) # # scopes_list.append(scope) # return tens # out = tf.map_fn(fc_map, out, back_prop=True) # out = tf.transpose(out, perm= [1, 2, 0]) # dim = int(math.sqrt(self.images.shape.as_list()[1])) # out = tf.reshape(out, [self.params['batch_size'], -1, dim, dim]) if True: def fc_map(tens): for i in range(num_fc): with tf.variable_scope('%s_fc_%d' %(subnet, i), reuse=self.reuse) as scope : tens = self._linear(input=tens, params=fully_connected) tens = self._activate(input=tens, params=fully_connected) scopes_list.append(scope) return tens out = tf.map_fn(fc_map, out, back_prop=True) out = tf.transpose(out, perm= [1, 2, 0]) dim = int(math.sqrt(self.images.shape.as_list()[1])) out = tf.reshape(out, [self.params['batch_size'], -1, dim, dim]) # else: # out = tf.reshape(out, [out_shape[0]*out_shape[1], out_shape[2], out_shape[3], out_shape[4]]) # with tf.variable_scope('%s_conv_1by1_1' % subnet, reuse=self.reuse) as scope: Loading Loading @@ -2898,7 +2898,7 @@ class YNet(FCDenseNet, FCNet): # out = self._activate(input=out, params=conv_1by1) # scopes_list.append(scope) # self.model_output[subnet] = out # self.all_scopes[subnet] += scopes_list self.all_scopes[subnet] += scopes_list def build_inverter(self): out = self.model_output['encoder'] Loading @@ -2906,18 +2906,18 @@ class YNet(FCDenseNet, FCNet): fully_connected = params['fc_params'] num_fc = params['n_fc_layers'] scopes_list = [] # if True: # def fc_map(tens): # for i in range(num_fc): # with tf.variable_scope('Inverter_fc_%d' %i, reuse=self.reuse) as scope : # tens = self._linear(input=tens, params=fully_connected) # tens = self._activate(input=tens, params=fully_connected) # # scopes_list.append(scope) # return tens # out = tf.map_fn(fc_map, out, back_prop=True, swap_memory=True, parallel_iterations=256) # out = tf.transpose(out, perm= [1, 2, 0]) # dim = int(math.sqrt(self.images.shape.as_list()[1])) # out = tf.reshape(out, [self.params['batch_size'], -1, dim, dim]) if True: def fc_map(tens): for i in range(num_fc): with tf.variable_scope('Inverter_fc_%d' %i, reuse=self.reuse) as scope : tens = self._linear(input=tens, params=fully_connected) tens = self._activate(input=tens, params=fully_connected) scopes_list.append(scope) return tens out = tf.map_fn(fc_map, out, back_prop=True, swap_memory=True, parallel_iterations=256) out = tf.transpose(out, perm= [1, 2, 0]) dim = int(math.sqrt(self.images.shape.as_list()[1])) out = tf.reshape(out, [self.params['batch_size'], -1, dim, dim]) # else: # conv_1by1_1 = OrderedDict({'type': 'conv_2D', 'stride': [1, 1], 'kernel': [1, 1], # 'features': 1, Loading
stemdl/runtime.py +2 −0 Original line number Diff line number Diff line Loading @@ -952,6 +952,8 @@ def validate_ckpt(network_config, hyper_params, params, num_batches=None, elif hyper_params['network_type'] == 'hybrid': pass elif hyper_params['network_type'] == 'inverter': if labels.shape.as_list()[1] > 1: labels, _, _ = [tf.expand_dims(itm, axis=1) for itm in tf.unstack(labels, axis=1)] loss_params = hyper_params['loss_function'] if params['output']: output = tf.cast(n_net.model_output, tf.float32) Loading