Commit 9091df9a authored by Unknown's avatar Unknown
Browse files

More PEP8 changes

parent 831cbad2
......@@ -503,7 +503,7 @@ def remove_duplicate_labels(atom_labels, psf_width, double_cropped_image, distan
axis.imshow(double_cropped_image, interpolation='none', cmap="gray")
axis.scatter(all_atom_pos[culprits[:, 0], 1], all_atom_pos[culprits[:, 0], 0], color='yellow')
axis.scatter(all_atom_pos[culprits[:, 1], 1], all_atom_pos[culprits[:, 1], 0], color='red')
axis.scatter(all_atom_pos[good_atom_inds, 1], all_atom_pos[good_atom_inds, 0], color='cyan');
axis.scatter(all_atom_pos[good_atom_inds, 1], all_atom_pos[good_atom_inds, 0], color='cyan')
# Now classify the culprit pairs into the correct family
classifier = KNeighborsClassifier(n_neighbors=num_neighbors)
......@@ -534,9 +534,9 @@ def remove_duplicate_labels(atom_labels, psf_width, double_cropped_image, distan
row_ind = int(np.round(all_atom_pos[atom_ind, 0]))
col_ind = int(np.round(all_atom_pos[atom_ind, 1]))
img_section = double_cropped_image[max(0, row_ind - neighbor_size):
min(double_cropped_image.shape[0], row_ind + neighbor_size),
max(0, col_ind - neighbor_size):
min(double_cropped_image.shape[1], col_ind + neighbor_size)]
min(double_cropped_image.shape[0], row_ind + neighbor_size),
max(0, col_ind - neighbor_size):
min(double_cropped_image.shape[1], col_ind + neighbor_size)]
amplitude_pair.append(np.max(img_section))
# print amplitude_pair
if amplitude_pair[0] > amplitude_pair[1]:
......
......@@ -509,8 +509,7 @@ def calc_switching_coef_vec(loop_coef_vec, nuc_threshold):
switching_coef_vec['R-'] = loop_coef_vec[:, 0]
switching_coef_vec['Switchable Polarization'] = loop_coef_vec[:, 1]
switching_coef_vec['Work of Switching'] = np.abs(loop_coef_vec[:, 3] -
loop_coef_vec[:, 2]) * \
np.abs(loop_coef_vec[:, 1])
loop_coef_vec[:, 2]) * np.abs(loop_coef_vec[:, 1])
switching_coef_vec['Nucleation Bias 1'] = nuc_v01
switching_coef_vec['Nucleation Bias 2'] = nuc_v02
......@@ -612,7 +611,7 @@ def generate_guess(vdc, pr_vec, show_plots=False):
x_pt = find_intersection(outline_1[pair], outline_2[pair],
[geom_centroid[0], hull.min_bound[1]],
[geom_centroid[0], hull.max_bound[1]])
if type(x_pt) != type(None):
if x_pt is None:
y_intersections.append(x_pt)
'''
......@@ -624,7 +623,7 @@ def generate_guess(vdc, pr_vec, show_plots=False):
x_pt = find_intersection(outline_1[pair], outline_2[pair],
[hull.min_bound[0], geom_centroid[1]],
[hull.max_bound[0], geom_centroid[1]])
if type(x_pt) != type(None):
if x_pt is None:
x_intersections.append(x_pt)
'''
......
......@@ -22,7 +22,7 @@ def SHOfunc(parms, w_vec):
Vector of frequency values
"""
return parms[0] * exp(1j * parms[3]) * parms[1] ** 2 / \
(w_vec ** 2 - 1j * w_vec * parms[1] / parms[2] - parms[1] ** 2)
(w_vec ** 2 - 1j * w_vec * parms[1] / parms[2] - parms[1] ** 2)
def SHOestimateGuess(w_vec, resp_vec, num_points=5):
......
......@@ -77,7 +77,7 @@ class Node(object):
self.num_nodes = 1
if verbose:
print('Parent node:', str(name), 'has', str(self.num_nodes), 'children')
if all([len(self.children) > 0, type(value) == type(None), compute_mean]):
if all([len(self.children) > 0, value is None, compute_mean]):
resp = []
for child in children:
if verbose:
......
......@@ -191,7 +191,7 @@ def getIndicesforPlotGroup(h5_udvs_inds, ds_udvs, plt_grp_name):
# All UDVS steps that are NOT part of the plot grop are empty cells in the table
# and hence assume a nan value.
# getting the udvs step indices that belong to this plot group:
step_inds = np.where(np.isnan(udvs_col_data) == False)[0]
step_inds = np.where(np.isnan(udvs_col_data) is False)[0]
# Getting the values in that plot group that were non NAN
udvs_plt_grp_col = udvs_col_data[step_inds]
......
......@@ -62,15 +62,19 @@ def get_all_main(parent, verbose=False):
main_list = list()
def __check(name, obj):
if verbose: print(name, obj)
if verbose:
print(name, obj)
if isinstance(obj, h5py.Dataset):
if verbose: print(name, 'is an HDF5 Dataset.')
if verbose:
print(name, 'is an HDF5 Dataset.')
ismain = checkIfMain(obj)
if ismain:
if verbose: print(name, 'is a `Main` dataset.')
if verbose:
print(name, 'is a `Main` dataset.')
main_list.append(obj)
if verbose: print('Checking the group {} for `Main` datasets.'.format(parent.name))
if verbose:
print('Checking the group {} for `Main` datasets.'.format(parent.name))
parent.visititems(__check)
return main_list
......
......@@ -54,7 +54,7 @@ class BEPSndfTranslator(Translator):
Absolute path of the generated .h5 file
"""
## Read the parameter files
# Read the parameter files
if debug:
print('BEndfTranslator: Getting file paths')
......@@ -99,7 +99,8 @@ class BEPSndfTranslator(Translator):
# Remove the unused plot group columns before proceeding:
self.udvs_mat, self.udvs_labs, self.udvs_units = trimUDVS(self.udvs_mat, self.udvs_labs, self.udvs_units,
ignored_plt_grps)
if debug: print('BEndfTranslator: Read UDVS file')
if debug:
print('BEndfTranslator: Read UDVS file')
self.num_udvs_steps = self.udvs_mat.shape[0]
# This is absolutely crucial for reconstructing the data chronologically
......@@ -111,7 +112,8 @@ class BEPSndfTranslator(Translator):
self.__num_wave_types__ = len(unique_waves)
# print self.__num_wave_types__, 'different excitation waveforms in this experiment'
if debug: print('BEndfTranslator: Preparing to set up parsers')
if debug:
print('BEndfTranslator: Preparing to set up parsers')
# Preparing objects to parse the file(s)
parsers = self.__assemble_parsers()
......@@ -192,11 +194,11 @@ class BEPSndfTranslator(Translator):
for prsr in parsers:
wave_type = prsr.get_wave_type()
if self.parm_dict['VS_mode'] == 'AC modulation mode with time reversal' and \
self.BE_bin_inds is not None:
if np.sign(wave_type) == -1:
bin_fft = self.BE_wave[self.BE_bin_inds]
elif np.sign(wave_type) == 1:
bin_fft = self.BE_wave_rev[self.BE_bin_inds]
self.BE_bin_inds is not None:
if np.sign(wave_type) == -1:
bin_fft = self.BE_wave[self.BE_bin_inds]
elif np.sign(wave_type) == 1:
bin_fft = self.BE_wave_rev[self.BE_bin_inds]
else:
bin_fft = None
......@@ -253,8 +255,12 @@ class BEPSndfTranslator(Translator):
pos_slice_dict = dict()
for spat_ind, spat_dim in enumerate(self.pos_labels):
pos_slice_dict[spat_dim] = (slice(None), slice(spat_ind, spat_ind + 1))
ds_pos_ind = MicroDataset('Position_Indices', self.pos_mat[self.ds_pixel_start_indx:
self.ds_pixel_start_indx + self.ds_pixel_index, :], dtype=np.uint)
ds_pos_ind = MicroDataset('Position_Indices',
self.pos_mat[self.ds_pixel_start_indx:self.ds_pixel_start_indx +
self.ds_pixel_index, :],
dtype=np.uint)
ds_pos_ind.attrs['labels'] = pos_slice_dict
ds_pos_ind.attrs['units'] = self.pos_units
......@@ -276,8 +282,8 @@ class BEPSndfTranslator(Translator):
# Z spectroscopy
self.pos_vals_list[:, 2] *= 1E+6 # convert to microns
pos_val_mat = np.float32(self.pos_mat[self.ds_pixel_start_indx:
self.ds_pixel_start_indx + self.ds_pixel_index, :])
pos_val_mat = np.float32(self.pos_mat[self.ds_pixel_start_indx:self.ds_pixel_start_indx +
self.ds_pixel_index, :])
for col_ind, targ_dim_name in enumerate(['X', 'Y', 'Z']):
if targ_dim_name in self.pos_labels:
......@@ -1012,10 +1018,11 @@ class BEPSndfPixel(object):
self.wave_modulation_type = data_mat1[2, 1] # this is the one with useful information
# print 'Pixel #',self.spatial_index,' Wave label: ',self.wave_label, ', Wave Type: ', self.wave_modulation_type
# First get the information from the columns:
fft_be_wave_real = data_mat1[s3:s3 - 0 + self.num_bins, 1] # real part of excitation waveform
fft_be_wave_imag = data_mat1[s3 + self.num_bins:s3 - 0 + spect_size1,
1] # imaginary part of excitation waveform
# First get the information from the columns:
# real part of excitation waveform
fft_be_wave_real = data_mat1[s3:s3 - 0 + self.num_bins, 1]
# imaginary part of excitation waveform
fft_be_wave_imag = data_mat1[s3 + self.num_bins:s3 - 0 + spect_size1, 1]
""" Though typecasting the combination of the real and imaginary data looks fine in HDFviewer and Spyder,
Labview sees such data as an array of clusters having 'r' and 'i' elements """
......@@ -1068,9 +1075,10 @@ class BEPSndfPixel(object):
self.laser_spot_pos_vec = data_mat1[s3 - 1, s4:] # NEVER used
# Actual data for this pixel:
spectrogram_real_mat = data_mat1[s3:s3 + self.num_bins, s4:] # real part of response spectrogram
spectrogram_imag_mat = data_mat1[s3 + self.num_bins:s3 + spect_size1,
s4:] # imaginary part of response spectrogram
# real part of response spectrogram
spectrogram_real_mat = data_mat1[s3:s3 + self.num_bins, s4:]
# imaginary part of response spectrogram
spectrogram_imag_mat = data_mat1[s3 + self.num_bins:s3 + spect_size1, s4:]
# Be consistent and ensure that the data is also stored as 64 bit complex as in the array creation
# complex part of response spectrogram
self.spectrogram_mat = np.complex64(spectrogram_real_mat + 1j * spectrogram_imag_mat)
......
......@@ -30,6 +30,7 @@ verbose = False
# string instead of an array
treat_as_string_names = ['.*Name']
def get_from_file(f, stype):
# print("reading", stype, "size", struct.calcsize(stype))
src = f.read(struct.calcsize(stype))
......
......@@ -117,11 +117,11 @@ class GDMTranslator(Translator):
spm_data = MicroDataGroup('')
global_parms = generate_dummy_main_parms()
global_parms['grid_size_x'] = parm_dict['grid_num_cols'];
global_parms['grid_size_y'] = parm_dict['grid_num_rows'];
global_parms['grid_size_x'] = parm_dict['grid_num_cols']
global_parms['grid_size_y'] = parm_dict['grid_num_rows']
# assuming that the experiment was completed:
global_parms['current_position_x'] = parm_dict['grid_num_cols'] - 1;
global_parms['current_position_y'] = parm_dict['grid_num_rows'] - 1;
global_parms['current_position_x'] = parm_dict['grid_num_cols'] - 1
global_parms['current_position_y'] = parm_dict['grid_num_rows'] - 1
global_parms['data_type'] = parm_dict['data_type'] # self.__class__.__name__
global_parms['translator'] = 'W2'
spm_data.attrs = global_parms
......
......@@ -98,11 +98,11 @@ class SporcTranslator(Translator):
ds_excit_wfm, ds_raw_data])
global_parms = generate_dummy_main_parms()
global_parms['grid_size_x'] = parm_dict['grid_num_cols'];
global_parms['grid_size_y'] = parm_dict['grid_num_rows'];
global_parms['grid_size_x'] = parm_dict['grid_num_cols']
global_parms['grid_size_y'] = parm_dict['grid_num_rows']
# assuming that the experiment was completed:
global_parms['current_position_x'] = parm_dict['grid_num_cols'] - 1;
global_parms['current_position_y'] = parm_dict['grid_num_rows'] - 1;
global_parms['current_position_x'] = parm_dict['grid_num_cols'] - 1
global_parms['current_position_y'] = parm_dict['grid_num_rows'] - 1
global_parms['data_type'] = parm_dict['data_type']
global_parms['translator'] = 'SPORC'
......
This diff is collapsed.
......@@ -63,9 +63,8 @@ class Cluster(object):
self.data_slice = (slice(None), comp_slice)
# figure out the operation that needs need to be performed to convert to real scalar
retval = check_dtype(h5_main)
self.data_transform_func, self.data_is_complex, self.data_is_compound, \
self.data_n_features, self.data_n_samples, self.data_type_mult = retval
(self.data_transform_func, self.data_is_complex, self.data_is_compound,
self.data_n_features, self.data_n_samples, self.data_type_mult) = check_dtype(h5_main)
def do_cluster(self, rearrange_clusters=True):
"""
......
......@@ -58,9 +58,8 @@ class Decomposition(object):
self.method_name = method_name
# figure out the operation that needs need to be performed to convert to real scalar
retval = check_dtype(h5_main)
self.data_transform_func, self.data_is_complex, self.data_is_compound, \
self.data_n_features, self.data_n_samples, self.data_type_mult = retval
(self.data_transform_func, self.data_is_complex, self.data_is_compound,
self.data_n_features, self.data_n_samples, self.data_type_mult) = check_dtype(h5_main)
def doDecomposition(self):
"""
......
......@@ -16,7 +16,7 @@ import skimage.feature
# TODO: Docstrings following numpy standard.
#### Functions
# Functions
def pickle_keypoints(keypoints):
"""
Function to pickle cv2.sift keypoint objects
......@@ -132,7 +132,7 @@ class FeatureExtractorParallel(object):
if mask:
def mask_func(x, winSize):
x[origin[0] - winSize / 2: origin[0] + winSize / 2,
origin[1] - winSize / 2: origin[1] + winSize / 2] = 2
origin[1] - winSize / 2: origin[1] + winSize / 2] = 2
x = x - 1
return x
......@@ -268,7 +268,7 @@ class FeatureExtractorSerial(object):
if mask:
def mask_func(x, winSize):
x[origin[0] - winSize / 2: origin[0] + winSize / 2,
origin[1] - winSize / 2: origin[1] + winSize / 2] = 2
origin[1] - winSize / 2: origin[1] + winSize / 2] = 2
x = x - 1
return x
......
......@@ -330,9 +330,6 @@ def harmonicsPassFilter(num_pts, samp_rate, first_freq, band_width, num_harm, do
return harm_filter
###############################################################################
# def removeNoiseHarmonics(F_AI_vec,samp_rate,noise_combs):
# """
# Removes specified noise frequencies from the signal
......
......@@ -1284,7 +1284,7 @@ class ImageWindow(object):
for k in range(r_n - 1):
r1 = r_vec[k]
r2 = r_vec[k + 1]
r_ind = np.where((r_mat >= r1) & (r_mat <= r2) == True)
r_ind = np.where((r_mat >= r1) and (r_mat <= r2))
fimabs_max[k] = np.max(fimabs[r_ind])
r_vec = r_vec[:-1] + (r_max - r_min) / (r_n - 1.0) / 2.0
......@@ -1482,7 +1482,7 @@ def radially_average_correlation(data_mat, num_r_bin):
step = 1 / (num_r_bin * 1.0 - 1)
for k, r_bin in enumerate(np.linspace(0, 1, num_r_bin)):
b = np.where((r_vec < r_bin + step) * (r_vec > r_bin) == True)[0]
b = np.where((r_vec < r_bin + step) and (r_vec > r_bin))[0]
if b.size == 0:
a_rad_avg_vec[k] = np.nan
......
......@@ -28,7 +28,7 @@ class ImageTransformation(object):
# TODO: Docstrings following numpy standard.
#### Functions
# Functions
def pickle_keypoints(keypoints):
"""
Function to pickle cv2.sift keypoint objects
......@@ -135,7 +135,7 @@ class FeatureExtractorParallel(object):
if mask:
def mask_func(x, winSize):
x[origin[0] - winSize / 2: origin[0] + winSize / 2,
origin[1] - winSize / 2: origin[1] + winSize / 2] = 2
origin[1] - winSize / 2: origin[1] + winSize / 2] = 2
x = x - 1
return x
......@@ -271,7 +271,7 @@ class FeatureExtractorSerial(object):
if mask:
def mask_func(x, winSize):
x[origin[0] - winSize / 2: origin[0] + winSize / 2,
origin[1] - winSize / 2: origin[1] + winSize / 2] = 2
origin[1] - winSize / 2: origin[1] + winSize / 2] = 2
x = x - 1
return x
......
......@@ -59,7 +59,8 @@ def buildHistogram(x_hist, data_mat, N_x_bins, N_y_bins, weighting_vec=1, min_re
min_resp = np.min(y_hist)
if max_resp is None:
max_resp = np.max(y_hist)
if debug: print('min_resp', min_resp, 'max_resp', max_resp)
if debug:
print('min_resp', min_resp, 'max_resp', max_resp)
y_hist = __scale_and_discretize(y_hist, N_y_bins, max_resp, min_resp, debug)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment