Commit b1910763 authored by Chris Smith's avatar Chris Smith
Browse files

PEP8 cleanups

parent a39552bc
......@@ -66,8 +66,12 @@ class BELoopFitter(Fitter):
"""
def __init__(self, h5_main, variables=['DC_Offset'], parallel=True):
def __init__(self, h5_main, variables=None, parallel=True):
if variables is None:
variables = ['DC_Offset']
super(BELoopFitter, self).__init__(h5_main, variables, parallel)
self._h5_group = None
self.h5_guess_parameters = None
self.h5_fit_parameters = None
......@@ -88,7 +92,7 @@ class BELoopFitter(Fitter):
self._maxDataChunk = 1
self._fit_dim_name = variables[0]
def _is_legal(self, h5_main, variables=['DC_Offset']):
def _is_legal(self, h5_main, variables=None):
"""
Checks whether or not the provided object can be analyzed by this class.
......@@ -106,6 +110,9 @@ class BELoopFitter(Fitter):
Whether or not this dataset satisfies the necessary conditions for analysis
"""
if variables is None:
variables = ['DC_Offset']
file_data_type = get_attr(h5_main.file, 'data_type')
meas_grp_name = h5_main.name.split('/')
h5_meas_grp = h5_main.file[meas_grp_name[1]]
......@@ -250,8 +257,8 @@ class BELoopFitter(Fitter):
# Reshape back
if len(self._sho_all_but_forc_inds) != 1:
projected_loops_2d2 = self._reshape_projected_loops_for_h5(projected_loops_2d.T,
order_dc_offset_reverse,
nd_mat_shape_dc_first)
order_dc_offset_reverse,
nd_mat_shape_dc_first)
metrics_2d = self._reshape_results_for_h5(loop_metrics_1d, nd_mat_shape_dc_first)
guessed_loops_2 = self._reshape_results_for_h5(guessed_loops, nd_mat_shape_dc_first)
......@@ -274,8 +281,8 @@ class BELoopFitter(Fitter):
return PycroDataset(self.h5_guess)
def do_fit(self, processors=None, max_mem=None, solver_type='least_squares', solver_options={'jac': '2-point'},
obj_func={'class': 'BE_Fit_Methods', 'obj_func': 'BE_LOOP', 'xvals': np.array([])},
def do_fit(self, processors=None, max_mem=None, solver_type='least_squares', solver_options=None,
obj_func=None,
get_loop_parameters=True, h5_guess=None):
"""
Fit the loops
......@@ -308,6 +315,10 @@ class BELoopFitter(Fitter):
List of the results returned by the solver
"""
if obj_func is None:
obj_func = {'class': 'BE_Fit_Methods', 'obj_func': 'BE_LOOP', 'xvals': np.array([])}
if solver_options is None:
solver_options = {'jac': '2-point'}
'''
Set the number of processors and the ammount of RAM to use in the fit
'''
......@@ -389,8 +400,6 @@ class BELoopFitter(Fitter):
self._start_pos = self._end_pos
self._get_guess_chunk()
elif legit_obj_func:
warn('Error: Solver "%s" does not exist!. For additional info see scipy.optimize\n' % solver_type)
return None
......@@ -592,9 +601,9 @@ class BELoopFitter(Fitter):
"""
# step 4: reshape to N dimensions
fit_nd, success = reshape_to_n_dims(raw_2d,
h5_pos=None,
h5_spec=self._sho_spec_inds[self._sho_all_but_forc_inds,
self._current_sho_spec_slice])
h5_pos=None,
h5_spec=self._sho_spec_inds[self._sho_all_but_forc_inds,
self._current_sho_spec_slice])
if not success:
warn('Error - could not reshape provided raw data chunk...')
return None
......@@ -657,9 +666,9 @@ class BELoopFitter(Fitter):
print('Projected loops after moving DC offset inwards:', projected_loops_nd_2.shape)
# step 11: reshape back to 2D
proj_loops_2d, success = reshape_from_n_dims(projected_loops_nd_2,
h5_pos=None,
h5_spec=self._sho_spec_inds[self._sho_all_but_forc_inds,
self._current_sho_spec_slice])
h5_pos=None,
h5_spec=self._sho_spec_inds[self._sho_all_but_forc_inds,
self._current_sho_spec_slice])
if not success:
warn('unable to reshape projected loops')
return None
......@@ -668,7 +677,7 @@ class BELoopFitter(Fitter):
return proj_loops_2d
def _reshape_results_for_h5(self, raw_results, nd_mat_shape_dc_first, verbose=False):
def _reshape_results_for_h5(self, raw_results, nd_mat_shape_dc_first):
"""
Reshapes the 1D loop metrics to the format such that they can be written to the h5 file
......@@ -680,8 +689,6 @@ class BELoopFitter(Fitter):
nd_mat_shape_dc_first : 1D numpy unsigned int array
Shape of the N dimensional array that the raw_results can be turned into.
We use the order_dc_offset_reverse after this reshape
verbose : Boolean (Optional. Default is False)
Whether or not to print debugging statements
Returns
-------
......@@ -703,8 +710,8 @@ class BELoopFitter(Fitter):
# step 11: reshape back to 2D
metrics_2d, success = reshape_from_n_dims(loop_metrics_nd,
h5_pos=None,
h5_spec=spec_inds)
h5_pos=None,
h5_spec=spec_inds)
if not success:
warn('unable to reshape ND results back to 2D')
return None
......@@ -727,18 +734,12 @@ class BELoopFitter(Fitter):
dc_vec : 1D float numpy array
DC offsets for the current FORC step
"""
spec_sort = get_sort_order(self._sho_spec_inds[self._sho_all_but_forc_inds, self._current_sho_spec_slice])
# get the size for each of these dimensions
spec_dims = self.h5_main.spec_dim_sizes
# apply this knowledge to reshape the spectroscopic values
# remember to reshape such that the dimensions are arranged in reverse order (slow to fast)
spec_vals_nd, success = reshape_to_n_dims(self._sho_spec_vals[self._sho_all_but_forc_inds,
self._current_sho_spec_slice],
h5_spec=self._sho_spec_inds[self._sho_all_but_forc_inds,
self._current_sho_spec_slice])
self._current_sho_spec_slice],
h5_spec=self._sho_spec_inds[self._sho_all_but_forc_inds,
self._current_sho_spec_slice])
# This should result in a N+1 dimensional matrix where the first index contains the actual data
# the other dimensions are present to easily slice the data
spec_labels_sorted = np.hstack(('Dim', self.h5_main.spec_dim_labels))
......@@ -943,7 +944,7 @@ class BELoopFitter(Fitter):
self.data = None
guess = self.h5_guess[self._start_pos:self._end_pos,
self._current_met_spec_slice].reshape([-1, 1])
self._current_met_spec_slice].reshape([-1, 1])
self.guess = flatten_compound_to_real(guess)[:, :-1]
def _create_guess_datasets(self):
......
......@@ -22,7 +22,7 @@ sho32 = np.dtype({'names': field_names,
class BESHOfitter(Fitter):
def __init__(self, h5_main, variables=['Frequency'], **kwargs):
def __init__(self, h5_main, variables=None, **kwargs):
"""
Analysis of Band excitation spectra with harmonic oscillator responses.
......@@ -34,7 +34,11 @@ class BESHOfitter(Fitter):
variables : list(string), Default ['Frequency']
Lists of attributes that h5_main should possess so that it may be analyzed by Model.
"""
if variables is None:
variables = ['Frequency']
super(BESHOfitter, self).__init__(h5_main, variables, **kwargs)
self.step_start_inds = None
self.is_reshapable = True
self.num_udvs_steps = None
......@@ -424,4 +428,4 @@ def is_reshapable(h5_main, step_start_inds=None):
step_start_inds = np.hstack((step_start_inds, h5_main.shape[1]))
num_bins = np.diff(step_start_inds)
step_types = np.unique(num_bins)
return len(step_types) == 1
\ No newline at end of file
return len(step_types) == 1
......@@ -101,8 +101,8 @@ class Optimize(object):
self.options = options
gm = GuessMethods()
if strategy in gm.methods:
func = gm.__getattribute__(strategy)#(**options)
# start pool of workers
func = gm.__getattribute__(strategy) # (**options)
# start pool of workers
if processors > 1:
print('Computing Jobs In parallel ... launching %i kernels...' % processors)
else:
......@@ -164,7 +164,7 @@ class Optimize(object):
solver = scipy.optimize.__dict__[self.solver_type]
values = [joblib.delayed(solver)(self.obj_func, guess,
args=[vector]+list(self.obj_func_args),
args=[vector] + list(self.obj_func_args),
**solver_options) for vector, guess in zip(self.data, self.guess)]
results = joblib.Parallel(n_jobs=processors)(values)
......
......@@ -709,9 +709,9 @@ def fit_loop(vdc_shifted, pr_shifted, guess):
err = y - loop_fit_function(x, p)
return err
def loop_jacobian_residuals(p, y, x):
Jerr = -loop_fit_jacobian(x, p)
return Jerr
# def loop_jacobian_residuals(p, y, x):
# Jerr = -loop_fit_jacobian(x, p)
# return Jerr
# do not change these:
lb = ([-1E3, -1E3, -1E3, -1E3, -1E-1, 1E-3, 1E-3, 1E-3, 1E-3]) # Lower Bounds
......
......@@ -24,7 +24,8 @@ from ...__version__ import version as pycroscopy_version
__all__ = ['get_attr', 'get_h5_obj_refs', 'get_indices_for_region_ref', 'get_dimensionality', 'get_sort_order',
'get_auxiliary_datasets', 'get_attributes', 'get_group_refs', 'check_if_main', 'check_and_link_ancillary',
'copy_region_refs', 'get_all_main', 'get_unit_values', 'get_data_descriptor', 'check_for_matching_attrs'
'create_region_reference', 'copy_attributes', 'reshape_to_n_dims', 'link_h5_objects_as_attrs',
'create_region_reference',
'copy_attributes', 'reshape_to_n_dims', 'link_h5_objects_as_attrs',
'link_h5_obj_as_alias',
'find_results_groups', 'get_formatted_labels', 'reshape_from_n_dims', 'find_dataset', 'print_tree',
'copy_main_attributes', 'create_empty_dataset', 'check_for_old', 'get_source_dataset',
......@@ -37,6 +38,7 @@ __all__ = ['get_attr', 'get_h5_obj_refs', 'get_indices_for_region_ref', 'get_dim
if sys.version_info.major == 3:
unicode = str
# TODO: Next version should account for two objects being in different files!
......@@ -163,7 +165,7 @@ def get_auxiliary_datasets(h5_object, aux_dset_name=None):
for curr_name in aux_dset_name:
h5_ref = h5_object.attrs[curr_name]
if isinstance(h5_ref, h5py.Reference) and isinstance(h5_file[h5_ref], h5py.Dataset) and not \
isinstance(h5_ref, h5py.RegionReference):
isinstance(h5_ref, h5py.RegionReference):
data_list.append(h5_file[h5_ref])
except KeyError:
raise KeyError('%s is not an attribute of %s' % (str(curr_name), h5_object.name))
......@@ -201,10 +203,10 @@ def get_attr(h5_object, attr_name):
att_val = att_val.decode('utf-8')
elif type(att_val) == np.ndarray:
if sys.version_info.major == 3:
if sys.version_info.major == 3:
if att_val.dtype.type in [np.bytes_, np.object_]:
att_val = np.array([str(x, 'utf-8') for x in att_val])
return att_val
......@@ -648,7 +650,7 @@ def create_region_reference(h5_main, ref_inds):
"""
if not isinstance(h5_main, h5py.Dataset):
raise TypeError('h5_main should be a h5py.Dataset object')
if not isinstance(ref_inds, (Iterable)):
if not isinstance(ref_inds, Iterable):
raise TypeError('ref_inds should be a list or tuple')
h5_space = h5_main.id.get_space()
......@@ -1244,7 +1246,8 @@ def create_empty_dataset(source_dset, dtype, dset_name, h5_group=None, new_attrs
h5_new_dset = h5_group.create_dataset(dset_name, shape=source_dset.shape, dtype=dtype,
compression=source_dset.compression, chunks=source_dset.chunks)
else:
raise KeyError('{} is already a {} in group: {}'.format(dset_name, type(h5_group[dset_name]), h5_group.name))
raise KeyError('{} is already a {} in group: {}'.format(dset_name, type(h5_group[dset_name]),
h5_group.name))
else:
h5_new_dset = h5_group.create_dataset(dset_name, shape=source_dset.shape, dtype=dtype,
......@@ -1294,25 +1297,25 @@ def copy_attributes(source, dest, skip_refs=True):
warn('Skipping region reference named: {}'.format(att_name))
continue
elif isinstance(att_val, h5py.RegionReference):
# """
# Dereference old reference, get the appropriate data
# slice and create new reference.
# """
# try:
# region = h5py.h5r.get_region(att_val, source.id)
#
# start, end = region.get_select_bounds()
# ref_slice = []
# for i in range(len(start)):
# if start[i] == end[i]:
# ref_slice.append(start[i])
# else:
# ref_slice.append(slice(start[i], end[i]))
# except:
# warn('Could not copy region reference:{} to {}'.format(att_name, dest.name))
# continue
#
# dest.attrs[att_name] = dest.regionref[tuple(ref_slice)]
# """
# Dereference old reference, get the appropriate data
# slice and create new reference.
# """
# try:
# region = h5py.h5r.get_region(att_val, source.id)
#
# start, end = region.get_select_bounds()
# ref_slice = []
# for i in range(len(start)):
# if start[i] == end[i]:
# ref_slice.append(start[i])
# else:
# ref_slice.append(slice(start[i], end[i]))
# except:
# warn('Could not copy region reference:{} to {}'.format(att_name, dest.name))
# continue
#
# dest.attrs[att_name] = dest.regionref[tuple(ref_slice)]
continue
else:
dest.attrs[att_name] = att_val
......@@ -2116,7 +2119,7 @@ def write_ind_val_dsets(h5_parent_group, dimensions, is_spectral=True, verbose=F
for sub_name in ['Indices', 'Values']:
if base_name + sub_name in h5_parent_group.keys():
raise KeyError('Dataset: {} already exists in provided group: {}'.format(base_name + sub_name,
h5_parent_group.name))
h5_parent_group.name))
unit_values = [x.values for x in dimensions]
......@@ -2492,7 +2495,7 @@ def write_main_dataset(h5_parent_group, main_data, main_data_name, quantity, uni
aux_prefix += '_'
if '-' in aux_prefix:
warn('aux_' + dim_type + ' should not contain the "-" character. Reformatted name from:{} to '
'{}'.format(aux_prefix, aux_prefix.replace('-', '_')))
'{}'.format(aux_prefix, aux_prefix.replace('-', '_')))
aux_prefix = aux_prefix.replace('-', '_')
for dset_name in [aux_prefix + 'Indices', aux_prefix + 'Values']:
if dset_name in h5_parent_group.keys():
......@@ -2648,7 +2651,7 @@ def attempt_reg_ref_build(h5_dset, dim_names, verbose=False):
if verbose:
print('Most likely a spectroscopic indices / values dataset')
for dim_index, curr_name in enumerate(dim_names):
labels_dict[curr_name] = (slice(dim_index, dim_index+1), slice(None))
labels_dict[curr_name] = (slice(dim_index, dim_index + 1), slice(None))
elif len(dim_names) == h5_dset.shape[1]:
if verbose:
print('Most likely a position indices / values dataset')
......@@ -2786,4 +2789,4 @@ def clean_reg_ref(h5_dset, reg_ref_tuple, verbose=False):
if verbose:
print('Region reference tuple now: {}'.format(new_reg_refs))
return tuple(new_reg_refs)
\ No newline at end of file
return tuple(new_reg_refs)
......@@ -155,10 +155,10 @@ class ImageTranslator(Translator):
pos_dims = [Dimension('X', 'a.u.', np.arange(usize)), Dimension('Y', 'a.u.', np.arange(vsize))]
chunking = calc_chunks([num_pixels, 1],
data_type(0).itemsize,
unit_chunks=[1, 1])
data_type(0).itemsize,
unit_chunks=[1, 1])
h5_main = write_main_dataset(chan_grp, (usize*vsize, 1), 'Raw_Data', 'Intensity', 'a.u.',
h5_main = write_main_dataset(chan_grp, (usize * vsize, 1), 'Raw_Data', 'Intensity', 'a.u.',
pos_dims, Dimension('None', 'a.u.', [1]), dtype=data_type, chunks=chunking)
self.h5_file.flush()
......
......@@ -43,12 +43,12 @@ class NumpyTranslator(Translator):
units : String / Unicode
Name of units for the quantity stored in the dataset. Example - 'A' for amperes
pos_dims : Dimension or array-like of Dimension objects
Sequence of Dimension objects that provides all necessary instructions for constructing the indices and values
datasets
Sequence of Dimension objects that provides all necessary instructions for constructing the
indices and values datasets
Object specifying the instructions necessary for building the Position indices and values datasets
spec_dims : Dimension or array-like of Dimension objects
Sequence of Dimension objects that provides all necessary instructions for constructing the indices and values
datasets
Sequence of Dimension objects that provides all necessary instructions for constructing the
indices and values datasets
Object specifying the instructions necessary for building the Spectroscopic indices and values datasets
translator_name : String / unicode, Optional
Name of the translator. Example - 'HitachiSEMTranslator'
......
......@@ -558,9 +558,11 @@ class PycroDataset(h5py.Dataset):
if verbose:
print('Position VizDimensions:')
for item in pos_dims: print('{}\n{}'.format(len(item.values), item))
for item in pos_dims:
print('{}\n{}'.format(len(item.values), item))
print('Spectroscopic VizDimensions:')
for item in spec_dims: print('{}\n{}'.format(len(item.values), item))
for item in spec_dims:
print('{}\n{}'.format(len(item.values), item))
print('N dimensional data sent to visualizer of shape: {}'.format(data_slice.shape))
simple_ndim_visualizer(data_slice, pos_dims, spec_dims, verbose=verbose, **kwargs)
......@@ -127,19 +127,19 @@ def make_indices_matrix(num_steps, is_position=True):
for indx, curr_steps in enumerate(num_steps):
if curr_steps > 1:
part1 = np.prod(num_steps[:indx+1])
part1 = np.prod(num_steps[:indx + 1])
if indx > 0:
part2 = np.prod(num_steps[:indx])
else:
part2 = 1
if indx+1 == len(num_steps):
if indx + 1 == len(num_steps):
part3 = 1
else:
part3 = np.prod(num_steps[indx+1:])
part3 = np.prod(num_steps[indx + 1:])
indices_matrix[:, dim_ind] = np.tile(np.floor(np.arange(part1)/part2), part3)
indices_matrix[:, dim_ind] = np.tile(np.floor(np.arange(part1) / part2), part3)
dim_ind += 1
if not is_position:
......@@ -358,4 +358,4 @@ def calc_chunks(dimensions, dtype_byte_size, unit_chunks=None, max_chunk_mem=102
chunking = tuple(unit_chunks)
return chunking
\ No newline at end of file
return chunking
from .process import Process, parallel_compute
__all__ = ['parallel_compute', 'Process']
\ No newline at end of file
__all__ = ['parallel_compute', 'Process']
from . import plot_utils
from . import jupyter_utils
__all__ = ['plot_utils', 'jupyter_utils']
\ No newline at end of file
__all__ = ['plot_utils', 'jupyter_utils']
......@@ -338,6 +338,7 @@ def simple_ndim_visualizer(data_mat, pos_dims, spec_dims, spec_xdim=None, pos_xd
return fig
def save_fig_filebox_button(fig, filename):
"""
Create ipython widgets to allow the user to save a figure to the
......@@ -380,4 +381,4 @@ def save_fig_filebox_button(fig, filename):
save_button.on_click(_save_fig)
return widget_box
\ No newline at end of file
return widget_box
......@@ -592,14 +592,15 @@ def plot_map(axis, img, show_xy_ticks=True, show_cbar=True, x_vec=None, y_vec=No
x_ticks = np.linspace(0, img.shape[1] - 1, num_ticks, dtype=int)
if x_vec is not None:
if isinstance(x_vec, (int,float)):
if isinstance(x_vec, (int, float)):
if x_vec > 0.01:
x_tick_labs = [str(np.round(ind* x_vec/img.shape[1],2)) for ind in x_ticks]
x_tick_labs = [str(np.round(ind * x_vec / img.shape[1], 2)) for ind in x_ticks]
else:
x_tick_labs = ['{0:.2e}'.format(ind* x_vec/img.shape[1]) for ind in x_ticks]
x_tick_labs = ['{0:.2e}'.format(ind * x_vec / img.shape[1]) for ind in x_ticks]
else:
if not isinstance(x_vec, (np.ndarray, list, tuple, range)) or len(x_vec) != img.shape[1]:
raise ValueError('x_vec should be array-like with shape equal to the second axis of img or img_size')
raise ValueError(
'x_vec should be array-like with shape equal to the second axis of img or img_size')
x_tick_labs = [str(np.round(x_vec[ind], 2)) for ind in x_ticks]
else:
x_tick_labs = [str(ind) for ind in x_ticks]
......@@ -614,11 +615,11 @@ def plot_map(axis, img, show_xy_ticks=True, show_cbar=True, x_vec=None, y_vec=No
if show_xy_ticks is True or y_vec is not None:
y_ticks = np.linspace(0, img.shape[0] - 1, num_ticks, dtype=int)
if y_vec is not None:
if isinstance(y_vec, (int,float)):
if isinstance(y_vec, (int, float)):
if y_vec > 0.01:
y_tick_labs = [str(np.round(ind* y_vec/img.shape[1],2)) for ind in y_ticks]
y_tick_labs = [str(np.round(ind * y_vec / img.shape[1], 2)) for ind in y_ticks]
else:
y_tick_labs = ['{0:.2e}'.format(ind* y_vec/img.shape[1]) for ind in y_ticks]
y_tick_labs = ['{0:.2e}'.format(ind * y_vec / img.shape[1]) for ind in y_ticks]
else:
if not isinstance(y_vec, (np.ndarray, list, tuple, range)) or len(y_vec) != img.shape[0]:
raise ValueError('y_vec should be array-like with shape equal to the first axis of img')
......@@ -1161,9 +1162,9 @@ def plot_map_stack(map_stack, num_comps=9, stdevs=2, color_bar_mode=None, evenly
igkwargs.update({key: kwargs.pop(key)})
axes = ImageGrid(fig, 111, nrows_ncols=(p_rows, p_cols),
cbar_mode=color_bar_mode,
axes_pad=(pad_w * fig_w, pad_h * fig_h),
**igkwargs)
cbar_mode=color_bar_mode,
axes_pad=(pad_w * fig_w, pad_h * fig_h),
**igkwargs)
fig.canvas.set_window_title(title)
# These parameters have not been easy to fix:
......
......@@ -315,7 +315,6 @@ class HDFwriter(object):
return h5_new_group
@staticmethod
def _create_simple_dset(h5_group, microdset):
"""
......
......@@ -198,7 +198,8 @@ class BEodfTranslator(Translator):
UDVS_mat = np.array([1, 0, parm_dict['BE_amplitude_[V]'], 1, 1, 1],
dtype=np.float32).reshape(1, len(UDVS_labs))
old_spec_inds = np.vstack((np.arange(tot_bins, dtype=INDICES_DTYPE), np.zeros(tot_bins, dtype=INDICES_DTYPE)))
old_spec_inds = np.vstack((np.arange(tot_bins, dtype=INDICES_DTYPE),
np.zeros(tot_bins, dtype=INDICES_DTYPE)))
# Some very basic information that can help the processing / analysis crew
parm_dict['num_bins'] = tot_bins
......@@ -278,23 +279,23 @@ class BEodfTranslator(Translator):
write_simple_attrs(h5_chan_grp, {'Channel_Input': 'IO_Analog_Input_1'})
# Now the datasets!
h5_ex_wfm = h5_chan_grp.create_dataset('Excitation_Waveform', data=ex_wfm)
h5_chan_grp.create_dataset('Excitation_Waveform', data=ex_wfm)
h5_udvs = h5_chan_grp.create_dataset('UDVS', data=UDVS_mat)
write_region_references(h5_udvs, udvs_slices, add_labels_attr=True, verbose=verbose)
write_simple_attrs(h5_udvs, {'units': UDVS_units}, verbose=verbose)
# ds_udvs_labs = MicroDataset('UDVS_Labels',np.array(UDVS_labs))
h5_UDVS_inds = h5_chan_grp.create_dataset('UDVS_Indices', data=old_spec_inds[1])
h5_chan_grp.create_dataset('UDVS_Indices', data=old_spec_inds[1])
# ds_spec_labs = MicroDataset('Spectroscopic_Labels',np.array(['Bin','UDVS_Step']))
h5_bin_steps = h5_chan_grp.create_dataset('Bin_Step', data=np.arange(bins_per_step, dtype=INDICES_DTYPE),
dtype=INDICES_DTYPE)
h5_chan_grp.create_dataset('Bin_Step', data=np.arange(bins_per_step, dtype=INDICES_DTYPE),
dtype=INDICES_DTYPE)
h5_bin_inds = h5_chan_grp.create_dataset('Bin_Indices', data=bin_inds, dtype=INDICES_DTYPE)
h5_bin_freq = h5_chan_grp.create_dataset('Bin_Frequencies', data=bin_freqs)
h5_bin_FFT = h5_chan_grp.create_dataset('Bin_FFT', data=bin_FFT)
h5_wfm_typ = h5_chan_grp.create_dataset('Bin_Wfm_Type', data=exec_bin_vec)
h5_chan_grp.create_dataset('Bin_Indices', data=bin_inds, dtype=INDICES_DTYPE)
h5_chan_grp.create_dataset('Bin_Frequencies', data=bin_freqs)
h5_chan_grp.create_dataset('Bin_FFT', data=bin_FFT)
h5_chan_grp.create_dataset('Bin_Wfm_Type', data=exec_bin_vec)
pos_dims = [Dimension('X', 'm', np.arange(num_cols)), Dimension('Y', 'm', np.arange(num_rows))]
h5_pos_ind, h5_pos_val = write_ind_val_dsets(h5_chan_grp, pos_dims, is_spectral=False, verbose=verbose)
......@@ -307,8 +308,8 @@ class BEodfTranslator(Translator):
write_simple_attrs(dset, spec_dim_dict)
# Noise floor should be of shape: (udvs_steps x 3 x positions)
h5_noise_floor = h5_chan_grp.create_dataset('Noise_Floor', (num_pix, num_actual_udvs_steps), dtype=nf32,
chunks=(1, num_actual_udvs_steps))
h5_chan_grp.create_dataset('Noise_Floor', (num_pix, num_actual_udvs_steps), dtype=nf32,
chunks=(1, num_actual_udvs_steps))
"""
New Method for chunking the Main_Data dataset. Chunking is now done in N-by-N squares
......
......@@ -85,7 +85,6 @@ class BEodfRelaxationTranslator(Translator):
if real_size != imag_size:
raise ValueError("Real and imaginary file sizes DON'T match!. Ending")
add_pix = False
num_rows = int(parm_dict['grid_num_rows'])
num_cols = int(parm_dict['grid_num_cols'])
num_pix = num_rows * num_cols
......@@ -103,7 +102,6 @@ class BEodfRelaxationTranslator(Translator):
elif not check_bins % 1:
tot_bins = check_bins
warn('Warning: A pixel seems to be missing from the data. File will be padded with zeros.')
add_pix = True
tot_bins = int(tot_bins)
(bin_inds, bin_freqs, bin_FFT, ex_wfm, dc_amp_vec) = self.__readOldMatBEvecs(path_dict['old_mat_parms'])
......
......@@ -378,7 +378,7 @@ class FakeBEPSGenerator(Translator):
spec_dims = list()
for dim_size, dim_name, dim_units, step_size, init_val in zip(spec_dims, spec_labs, spec_units, spec_steps,
spec_start):
spec_dims.append(Dimension(dim_name, dim_units, np.arange(dim_size)*step_size + init_val))
spec_dims.append(Dimension(dim_name, dim_units, np.arange(dim_size) * step_size + init_val))
spec_inds, spec_vals = build_ind_val_dsets(spec_dims, is_spectral=True)