Commit d0dc4da4 authored by Somnath, Suhas's avatar Somnath, Suhas
Browse files

VirtualDataset no longer given data=[]

parent 4ae85ad9
......@@ -456,10 +456,10 @@ class BELoopFitter(Fitter):
tot_cycles = cycle_start_inds.size
# Prepare containers for the dataets
ds_projected_loops = VirtualDataset('Projected_Loops', data=[], dtype=np.float32,
ds_projected_loops = VirtualDataset('Projected_Loops', data=None, dtype=np.float32,
maxshape=self.h5_main.shape, chunking=self.h5_main.chunks,
compression='gzip')
ds_loop_metrics = VirtualDataset('Loop_Metrics', data=[], dtype=loop_metrics32,
ds_loop_metrics = VirtualDataset('Loop_Metrics', data=None, dtype=loop_metrics32,
maxshape=(self.h5_main.shape[0], tot_cycles))
ds_loop_met_spec_inds, ds_loop_met_spec_vals = build_reduced_spec_dsets(self._sho_spec_inds, self._sho_spec_vals,
......
......@@ -68,7 +68,7 @@ class BESHOfitter(Fitter):
links the guess dataset to the spectroscopic datasets.
"""
# Create all the ancilliary datasets, allocate space.....
ds_guess = VirtualDataset('Guess', data=[],
ds_guess = VirtualDataset('Guess', data=None,
maxshape=(self.h5_main.shape[0], self.num_udvs_steps),
chunking=(1, self.num_udvs_steps), dtype=sho32)
ds_guess.attrs = self._parms_dict
......
......@@ -77,7 +77,7 @@ class GIVBayesian(Process):
self.forward_results = None
self._bayes_parms = None
def test(self, pix_ind=None, show_plots=True, econ=False):
def test(self, pix_ind=None, show_plots=True, econ=True):
"""
Tests the inference on a single pixel (randomly chosen unless manually specified) worth of data.
......@@ -97,7 +97,9 @@ class GIVBayesian(Process):
if pix_ind is None:
pix_ind = np.random.randint(0, high=self.h5_main.shape[0])
other_params = self.parms_dict.copy()
# removing duplicates:
_ = other_params.pop('freq')
_ = other_params.pop('econ')
return bayesian_inference_on_period(self.h5_main[pix_ind], self.single_ao, self.parms_dict['freq'],
show_plots=show_plots, econ=econ, **other_params)
......@@ -139,7 +141,7 @@ class GIVBayesian(Process):
ds_spec_inds, ds_spec_vals = build_ind_val_dsets(spec_desc, is_spectral=True, verbose=self.verbose)
cap_shape = (num_pos, 1)
ds_cap = VirtualDataset('Capacitance', data=[], maxshape=cap_shape, dtype=cap_dtype, chunking=cap_shape,
ds_cap = VirtualDataset('Capacitance', data=None, maxshape=cap_shape, dtype=cap_dtype, chunking=cap_shape,
compression='gzip')
ds_cap.attrs = {'quantity': 'Capacitance', 'units': 'pF'}
cap_spec_desc = AuxillaryDescriptor([1], ['Direction'], [''])
......
......@@ -283,7 +283,7 @@ class BEodfTranslator(Translator):
BEPS_chunks = calc_chunks([num_pix, tot_bins],
np.complex64(0).itemsize,
unit_chunks=(1, bins_per_step))
ds_main_data = VirtualDataset('Raw_Data', data=[],
ds_main_data = VirtualDataset('Raw_Data', data=None,
maxshape=(num_pix, tot_bins),
dtype=np.complex64,
chunking=BEPS_chunks,
......
......@@ -228,7 +228,7 @@ class BEodfRelaxationTranslator(Translator):
chunking = np.floor(np.sqrt(pixel_chunking))
chunking = max(1, chunking)
chunking = min(num_actual_udvs_steps, num_pix, chunking)
ds_main_data = VirtualDataset('Raw_Data', data=[], maxshape=(num_pix, tot_bins), dtype=np.complex64,
ds_main_data = VirtualDataset('Raw_Data', data=None, maxshape=(num_pix, tot_bins), dtype=np.complex64,
chunking=(chunking, chunking * bins_per_step), compression='gzip')
chan_grp = VirtualGroup('Channel_')
......
......@@ -422,7 +422,7 @@ class FakeBEPSGenerator(Translator):
np.complex64(0).itemsize,
unit_chunks=[1, self.n_bins])
ds_raw_data = VirtualDataset('Raw_Data', data=[],
ds_raw_data = VirtualDataset('Raw_Data', data=None,
maxshape=[self.n_pixels, self.n_spec_bins],
dtype=np.complex64,
compression='gzip',
......@@ -471,13 +471,13 @@ class FakeBEPSGenerator(Translator):
self.n_sho_bins],
sho32.itemsize,
unit_chunks=[1, 1])
ds_sho_fit = VirtualDataset('Fit', data=[],
ds_sho_fit = VirtualDataset('Fit', data=None,
maxshape=[self.n_pixels, self.n_sho_bins],
dtype=sho32,
compression='gzip',
chunking=sho_chunking,
parent=sho_grp)
ds_sho_guess = VirtualDataset('Guess', data=[],
ds_sho_guess = VirtualDataset('Guess', data=None,
maxshape=[self.n_pixels, self.n_sho_bins],
dtype=sho32,
compression='gzip',
......@@ -519,14 +519,14 @@ class FakeBEPSGenerator(Translator):
loop_chunking = calc_chunks([self.n_pixels, self.n_loops],
loop_fit32.itemsize,
unit_chunks=[1, 1])
ds_loop_fit = VirtualDataset('Fit', data=[],
ds_loop_fit = VirtualDataset('Fit', data=None,
maxshape=[self.n_pixels, self.n_loops],
dtype=loop_fit32,
compression='gzip',
chunking=loop_chunking,
parent=loop_grp)
ds_loop_guess = VirtualDataset('Guess', data=[],
ds_loop_guess = VirtualDataset('Guess', data=None,
maxshape=[self.n_pixels, self.n_loops],
dtype=loop_fit32,
compression='gzip',
......
......@@ -106,7 +106,7 @@ class GDMTranslator(Translator):
# Minimize file size to the extent possible.
# DAQs are rated at 16 bit so float16 should be most appropriate.
# For some reason, compression is more effective on time series data
ds_main_data = VirtualDataset('Raw_Data', data=[], maxshape=(num_pix, len(freq_array) * num_bins),
ds_main_data = VirtualDataset('Raw_Data', data=None, maxshape=(num_pix, len(freq_array) * num_bins),
dtype=np.float32, chunking=(1, num_bins), compression='gzip')
chan_grp = VirtualGroup('Channel_000')
......
......@@ -66,7 +66,7 @@ class GIVTranslator(Translator):
# Minimize file size to the extent possible.
# DAQs are rated at 16 bit so float16 should be most appropriate.
# For some reason, compression is effective only on time series data
ds_raw_data = VirtualDataset('Raw_Data', data=[],
ds_raw_data = VirtualDataset('Raw_Data', data=None,
maxshape=(parm_dict['grid_num_rows'], excit_wfm.size),
dtype=np.float16, chunking=(1, excit_wfm.size), compression='gzip')
ds_raw_data.attrs['quantity'] = ['Current']
......
......@@ -133,7 +133,7 @@ class GLineTranslator(Translator):
This does NOT change with each file. The data written to it does.
The auxiliary datasets will not change with each raw data file since
only one excitation waveform is used"""
ds_main_data = VirtualDataset('Raw_Data', data=[],
ds_main_data = VirtualDataset('Raw_Data', data=None,
maxshape=(self.num_rows, self.points_per_pixel * num_cols),
chunking=(1, self.points_per_pixel), dtype=np.float16)
ds_main_data.attrs['quantity'] = ['Deflection']
......
......@@ -143,7 +143,7 @@ class GTuneTranslator(GLineTranslator):
The auxiliary datasets will not change with each raw data file since
only one excitation waveform is used
"""
ds_main_data = VirtualDataset('Raw_Data', data=[],
ds_main_data = VirtualDataset('Raw_Data', data=None,
maxshape=(self.num_rows, self.points_per_pixel * num_cols),
chunking=(1, self.points_per_pixel), dtype=np.float16)
ds_main_data.attrs['quantity'] = ['Deflection']
......
......@@ -161,7 +161,7 @@ class ImageTranslator(Translator):
unit_chunks=[1, 1])
# Allocate space for Main_Data and Pixel averaged Data
ds_main_data = VirtualDataset('Raw_Data', data=[], maxshape=(num_pixels, 1),
ds_main_data = VirtualDataset('Raw_Data', data=None, maxshape=(num_pixels, 1),
chunking=ds_chunking, dtype=data_type, compression='gzip')
# Add datasets as children of Measurement_000 data group
chan_grp.add_children([ds_main_data, ds_spec_ind, ds_spec_vals, ds_pos_ind,
......
......@@ -275,7 +275,7 @@ class NDataTranslator(Translator):
unit_chunks=(1, num_pixels))
# Allocate space for Main_Data and Pixel averaged Data
ds_main_data = VirtualDataset('Raw_Data', data=[], maxshape=(num_images, num_pixels),
ds_main_data = VirtualDataset('Raw_Data', data=None, maxshape=(num_images, num_pixels),
chunking=ds_chunking, dtype=np.float32, compression='gzip')
ds_mean_ronch_data = VirtualDataset('Mean_Ronchigram',
data=np.zeros(num_pixels, dtype=np.float32),
......
......@@ -465,7 +465,7 @@ class OneViewTranslator(Translator):
unit_chunks=(1, num_pixels))
# Allocate space for Main_Data and Pixel averaged Data
ds_main_data = VirtualDataset('Raw_Data', data=[], maxshape=(num_files, num_pixels),
ds_main_data = VirtualDataset('Raw_Data', data=None, maxshape=(num_files, num_pixels),
chunking=ds_chunking, dtype=data_type, compression='gzip')
ds_mean_ronch_data = VirtualDataset('Mean_Ronchigram',
data=np.zeros(num_pixels, dtype=np.float32),
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment