Commit 18d3d29f authored by Unknown's avatar Unknown
Browse files

Formatting cleanups

parent c26a8dab
......@@ -111,7 +111,7 @@ def getAvailableMem():
import sys
mem = vm().available
if sys.maxsize <= 2**32:
if sys.maxsize <= 2 ** 32:
mem = min([mem, sys.maxsize])
return mem
......@@ -293,7 +293,7 @@ def realToCompound(ds_real, compound_type):
ds_compound : 2D complex numpy array
Data arranged as [sample, features]
"""
new_spec_length = ds_real.shape[1]/len(compound_type)
new_spec_length = ds_real.shape[1] / len(compound_type)
if new_spec_length % 1:
raise TypeError('Provided compound type was not compatible by numbr of elements')
......@@ -350,4 +350,4 @@ def transformToReal(ds_main):
elif len(ds_main.dtype) > 0:
return compound_to_scalar(ds_main)
else:
return ds_main
\ No newline at end of file
return ds_main
......@@ -119,7 +119,7 @@ def save_image(image, file):
# doesn't work. Do we need a ImageSourceList too?
# and a DocumentObjectList?
image = ndarray_to_imagedatadict(image)
ret = {}
ret = dict()
ret["ImageList"] = [{"ImageData": image}]
# I think ImageSource list creates a mapping between ImageSourceIds and Images
ret["ImageSourceList"] = [{"ClassName": "ImageSource:Simple", "Id": [0], "ImageRef": 0}]
......
......@@ -22,6 +22,9 @@ class ForcIVTranslator(Translator):
Translates FORC IV datasets from .mat files to .h5
"""
def _read_data(self):
pass
def _parse_file_path(self, input_path):
pass
......
......@@ -24,7 +24,13 @@ class GDMTranslator(Translator):
"""
Translates G-mode w^2 datasets from .mat files to .h5
"""
def _read_data(self):
pass
def _parse_file_path(self, input_path):
pass
def translate(self, parm_path):
"""
Basic method that translates .mat data files to a single .h5 file
......
......@@ -24,7 +24,10 @@ class SporcTranslator(Translator):
"""
Translates G-mode SPORC datasets from .mat files to .h5
"""
def _parse_file_path(self, input_path):
pass
def translate(self, parm_path):
"""
Basic method that translates .mat data files to a single .h5 file
......@@ -39,7 +42,7 @@ class SporcTranslator(Translator):
h5_path : string / unicode
Absolute path of the translated h5 file
"""
(folder_path, file_name) = path.split(parm_path)
(file_name, base_name) = path.split(folder_path)
h5_path = path.join(folder_path,base_name+'.h5')
......
......@@ -330,7 +330,7 @@ def fft_filter_dataset(h5_main, filter_parms, write_filtered=True, write_condens
doing_noise_floor_filter = False
if 'noise_threshold' in filter_parms:
if filter_parms['noise_threshold'] > 0 and filter_parms['noise_threshold'] < 1:
if 0 < filter_parms['noise_threshold'] < 1:
ds_noise_floors = MicroDataset('Noise_Floors',
data=np.zeros(shape=num_effective_pix, dtype=np.float32))
doing_noise_floor_filter = True
......@@ -507,7 +507,7 @@ def filter_chunk_parallel(raw_data, parm_dict, num_cores):
noise_thresh = None
if 'noise_threshold' in parm_dict['filter_parms']:
noise_thresh = parm_dict['filter_parms']['noise_threshold']
if noise_thresh > 0 and noise_thresh < 1:
if 0 < noise_thresh < 1:
noise_floors = np.zeros(shape=num_sets, dtype=np.float32)
else:
noise_thresh = None
......@@ -590,7 +590,7 @@ def filter_chunk_serial(raw_data, parm_dict):
noise_thresh = None
if 'noise_threshold' in parm_dict['filter_parms']:
noise_thresh = parm_dict['filter_parms']['noise_threshold']
if noise_thresh > 0 and noise_thresh < 1:
if 0 < noise_thresh < 1:
noise_floors = np.zeros(shape=num_sets, dtype=np.float32)
else:
noise_thresh = None
......@@ -656,7 +656,7 @@ def unit_filter(single_parm):
f_data = np.fft.fftshift(np.fft.fft(t_raw))
if 'noise_threshold' in filter_parms:
if filter_parms['noise_threshold'] > 0 and filter_parms['noise_threshold'] < 1:
if 0 < filter_parms['noise_threshold'] < 1:
noise_floor = getNoiseFloor(f_data, filter_parms['noise_threshold'])[0]
f_data[np.abs(f_data) < noise_floor] = 1E-16 # DON'T use 0 here. ipython kernel dies
......
......@@ -380,7 +380,7 @@ def __findDataset(h5_file,ds_name):
'''
Uses visit() to find all datasets with the desired name
'''
print 'Finding all instances of',ds_name
print('Finding all instances of',ds_name)
ds = []
def __findName(name,obj):
if name.split('/')[-1] == ds_name and isinstance(obj,h5py.Dataset):
......@@ -389,4 +389,4 @@ def __findDataset(h5_file,ds_name):
h5_file.visititems(__findName)
return ds
\ No newline at end of file
return ds
......@@ -696,7 +696,7 @@ def plot_map_stack(map_stack, num_comps=9, stdevs=2, color_bar_mode=None, evenly
if sys.version_info.major == 3:
inspec_func = inspect.getfullargspec
else:
inspec_func = inspect.getargspec
inspec_func = inspect.signature
for key in inspec_func(plt.figure).args:
if key in kwargs:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment