be_odf.py 64.2 KB
Newer Older
Somnath, Suhas's avatar
Somnath, Suhas committed
1
2
3
4
5
6
7
# -*- coding: utf-8 -*-
"""
Created on Tue Nov  3 15:24:12 2015

@author: Suhas Somnath, Stephen Jesse
"""

8
from __future__ import division, print_function, absolute_import, unicode_literals
9

Somnath, Suhas's avatar
Somnath, Suhas committed
10
from os import path, listdir, remove
11
import sys
12
import datetime
13
from warnings import warn
14
import h5py
Somnath, Suhas's avatar
Somnath, Suhas committed
15
16
import numpy as np
from scipy.io.matlab import loadmat  # To load parameters stored in Matlab .mat file
17

18
from .df_utils.be_utils import trimUDVS, getSpectroscopicParmLabel, parmsToDict, generatePlotGroups, \
19
    createSpecVals, requires_conjugate, nf32
20
from pyUSID.io.translator import Translator
21
22
from pyUSID.io.write_utils import INDICES_DTYPE, VALUES_DTYPE, Dimension, calc_chunks
from pyUSID.io.hdf_utils import write_ind_val_dsets, write_main_dataset, write_region_references, \
23
    create_indexed_group, write_simple_attrs, write_book_keeping_attrs, copy_attributes,\
24
    write_reduced_anc_dsets
25
from pyUSID.io.usi_data import USIDataset
26
from pyUSID.processing.comp_utils import get_available_memory
27

28
29
30
if sys.version_info.major == 3:
    unicode = str

31

Somnath, Suhas's avatar
Somnath, Suhas committed
32
33
34
35
36
class BEodfTranslator(Translator):
    """
    Translates either the Band Excitation (BE) scan or Band Excitation 
    Polarization Switching (BEPS) data format from the old data format(s) to .h5
    """
Unknown's avatar
Unknown committed
37

Chris Smith's avatar
Chris Smith committed
38
39
40
    def __init__(self, *args, **kwargs):
        super(BEodfTranslator, self).__init__(*args, **kwargs)
        self.h5_raw = None
41
        self.num_rand_spectra = kwargs.pop('num_rand_spectra', 1000)
42
        self._cores = kwargs.pop('cores', None)
Unknown's avatar
Unknown committed
43
44
45
        self.FFT_BE_wave = None
        self.signal_type = None
        self.expt_type = None
Chris Smith's avatar
Chris Smith committed
46

47
    @staticmethod
48
    def is_valid_file(data_path):
49
50
51
52
53
        """
        Checks whether the provided file can be read by this translator

        Parameters
        ----------
54
        data_path : str
55
56
57
58
            Path to raw data file

        Returns
        -------
59
60
61
62
        obj : str
            Path to file that will be accepted by the translate() function if
            this translator is indeed capable of translating the provided file.
            Otherwise, None will be returned
63
        """
64
65
66
67
68
69
70
71
        if not isinstance(data_path, (str, unicode)):
            raise TypeError('data_path must be a string')

        ndf = 'newdataformat'

        data_path = path.abspath(data_path)

        if path.isfile(data_path):
72
73
74
75
            ext = data_path.split('.')[-1]
            if ext.lower() not in ['jpg', 'png', 'jpeg', 'tiff', 'mat', 'txt',
                                   'dat', 'xls', 'xlsx']:
                return None
76
77
            # we only care about the folder names at this point...
            data_path, _ = path.split(data_path)
78
79

        # Check if the data is in the new or old format:
80
81
82
83
84
85
86
        # Check one level up:
        _, dir_name = path.split(data_path)
        if dir_name == ndf:
            # Though this translator could also read the files but the NDF Translator is more robust...
            return None
        # Check one level down:
        if ndf in listdir(data_path):
87
            # Though this translator could also read the files but the NDF Translator is more robust...
88
89
90
            return None

        file_path = path.join(data_path, listdir(path=data_path)[0])
91
92

        _, path_dict = BEodfTranslator._parse_file_path(file_path)
93

94
95
        if any([x.find('bigtime_0') > 0 and x.endswith('.dat') for x in path_dict.values()]):
            # This is a G-mode Line experiment:
96
            return None
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
        if any([x.find('bigtime_0') > 0 and x.endswith('.dat') for x in
                path_dict.values()]):
            # This is a G-mode Line experiment:
            return None

        parm_found = any([piece in path_dict.keys() for piece in
                          ['parm_txt', 'old_mat_parms']])
        real_found = any([piece in path_dict.keys() for piece in
                          ['read_real', 'write_real']])
        imag_found = any([piece in path_dict.keys() for piece in
                          ['read_imag', 'write_imag']])

        if parm_found and real_found and imag_found:
            if 'parm_txt' in path_dict.keys():
                return path_dict['parm_txt']
            else:
                return path_dict['old_mat_parms']
114
        else:
115
            return None
116

117
    def translate(self, file_path, show_plots=True, save_plots=True, do_histogram=False, verbose=False):
Somnath, Suhas's avatar
Somnath, Suhas committed
118
119
120
121
122
123
124
125
126
127
128
129
130
131
        """
        Translates .dat data file(s) to a single .h5 file
        
        Parameters
        -------------
        file_path : String / Unicode
            Absolute file path for one of the data files. 
            It is assumed that this file is of the OLD data format.
        show_plots : (optional) Boolean
            Whether or not to show intermediate plots
        save_plots : (optional) Boolean
            Whether or not to save plots to disk
        do_histogram : (optional) Boolean
            Whether or not to construct histograms to visualize data quality. Note - this takes a fair amount of time
132
133
        verbose : (optional) Boolean
            Whether or not to print statements
Somnath, Suhas's avatar
Somnath, Suhas committed
134
135
136
137
138
139
            
        Returns
        ----------
        h5_path : String / Unicode
            Absolute path of the resultant .h5 file
        """
140
        file_path = path.abspath(file_path)
Somnath, Suhas's avatar
Somnath, Suhas committed
141
        (folder_path, basename) = path.split(file_path)
142
        (basename, path_dict) = self._parse_file_path(file_path)
Unknown's avatar
Unknown committed
143

Somnath, Suhas's avatar
Somnath, Suhas committed
144
        h5_path = path.join(folder_path, basename + '.h5')
Somnath, Suhas's avatar
Somnath, Suhas committed
145
146
        tot_bins_multiplier = 1
        udvs_denom = 2
Unknown's avatar
Unknown committed
147

Somnath, Suhas's avatar
Somnath, Suhas committed
148
        if 'parm_txt' in path_dict.keys():
149
150
            if verbose:
                print('\treading parameters from text file')
Unknown's avatar
Unknown committed
151
            (isBEPS, parm_dict) = parmsToDict(path_dict['parm_txt'])
Somnath, Suhas's avatar
Somnath, Suhas committed
152
        elif 'old_mat_parms' in path_dict.keys():
153
154
            if verbose:
                print('\treading parameters from old mat file')
Somnath, Suhas's avatar
Somnath, Suhas committed
155
            parm_dict = self.__get_parms_from_old_mat(path_dict['old_mat_parms'])
156
157
158
159
            if parm_dict['VS_steps_per_full_cycle'] == 0:
                isBEPS=False
            else:
                isBEPS=True
Somnath, Suhas's avatar
Somnath, Suhas committed
160
        else:
161
            raise IOError('No parameters file found! Cannot translate this dataset!')
162
163
        if verbose:
            print('\tisBEPS = {}'.format(isBEPS))
Unknown's avatar
Unknown committed
164

Somnath, Suhas's avatar
Somnath, Suhas committed
165
166
167
        ignored_plt_grps = []
        if isBEPS:
            parm_dict['data_type'] = 'BEPSData'
Unknown's avatar
Unknown committed
168

Somnath, Suhas's avatar
Somnath, Suhas committed
169
170
            field_mode = parm_dict['VS_measure_in_field_loops']
            std_expt = parm_dict['VS_mode'] != 'load user defined VS Wave from file'
Unknown's avatar
Unknown committed
171

Somnath, Suhas's avatar
Somnath, Suhas committed
172
            if not std_expt:
173
                raise ValueError('This translator does not handle user defined voltage spectroscopy')
Unknown's avatar
Unknown committed
174
175
176

            spec_label = getSpectroscopicParmLabel(parm_dict['VS_mode'])

Somnath, Suhas's avatar
Somnath, Suhas committed
177
            if parm_dict['VS_mode'] in ['DC modulation mode', 'current mode']:
Somnath, Suhas's avatar
Somnath, Suhas committed
178
179
180
181
182
183
184
185
186
187
188
                if field_mode == 'in and out-of-field':
                    tot_bins_multiplier = 2
                    udvs_denom = 1
                else:
                    if field_mode == 'out-of-field':
                        ignored_plt_grps = ['in-field']
                    else:
                        ignored_plt_grps = ['out-of-field']
            else:
                tot_bins_multiplier = 1
                udvs_denom = 1
Unknown's avatar
Unknown committed
189

Somnath, Suhas's avatar
Somnath, Suhas committed
190
191
192
        else:
            spec_label = 'None'
            parm_dict['data_type'] = 'BELineData'
Unknown's avatar
Unknown committed
193

Somnath, Suhas's avatar
Somnath, Suhas committed
194
        # Check file sizes:
195
196
197
        if verbose:
            print('\tChecking sizes of real and imaginary data files')

Somnath, Suhas's avatar
Somnath, Suhas committed
198
        if 'read_real' in path_dict.keys():
Somnath, Suhas's avatar
Somnath, Suhas committed
199
200
            real_size = path.getsize(path_dict['read_real'])
            imag_size = path.getsize(path_dict['read_imag'])
Somnath, Suhas's avatar
Somnath, Suhas committed
201
202
203
        else:
            real_size = path.getsize(path_dict['write_real'])
            imag_size = path.getsize(path_dict['write_imag'])
Unknown's avatar
Unknown committed
204

Somnath, Suhas's avatar
Somnath, Suhas committed
205
206
207
        if real_size != imag_size:
            raise ValueError("Real and imaginary file sizes DON'T match!. Ending")

208
        # Check here if a second channel for current is present
209
210
        # Look for the file containing the current data

211
212
        if verbose:
            print('\tLooking for secondary channels')
213
214
        file_names = listdir(folder_path)
        aux_files = []
Unknown's avatar
Unknown committed
215
        current_data_exists = False
216
217
218
219
220
221
222
        for fname in file_names:
            if 'AI2' in fname:
                if 'write' in fname:
                    current_file = path.join(folder_path, fname)
                    current_data_exists=True
                aux_files.append(path.join(folder_path, fname))

Unknown's avatar
Unknown committed
223
        add_pix = False
Somnath, Suhas's avatar
Somnath, Suhas committed
224
225
        num_rows = int(parm_dict['grid_num_rows'])
        num_cols = int(parm_dict['grid_num_cols'])
Unknown's avatar
Unknown committed
226
227
        num_pix = num_rows * num_cols
        tot_bins = real_size / (num_pix * 4)
Chris Smith's avatar
Chris Smith committed
228
        # Check for case where only a single pixel is missing.
Unknown's avatar
Unknown committed
229
230
        check_bins = real_size / ((num_pix - 1) * 4)

231
232
233
234
        if verbose:
            print('\tChecking bins: Total: {}, actual: {}'.format(tot_bins,
                                                                  check_bins))

Unknown's avatar
Unknown committed
235
        if tot_bins % 1 and check_bins % 1:
236
237
            raise ValueError('Aborting! Some parameter appears to have '
                             'changed in-between')
Somnath, Suhas's avatar
Somnath, Suhas committed
238
        elif not tot_bins % 1:
Chris Smith's avatar
Chris Smith committed
239
            # Everything's ok
Somnath, Suhas's avatar
Somnath, Suhas committed
240
241
242
            pass
        elif not check_bins % 1:
            tot_bins = check_bins
243
244
            warn('Warning:  A pixel seems to be missing from the data. '
                 'File will be padded with zeros.')
Unknown's avatar
Unknown committed
245
246
247
248
            add_pix = True

        tot_bins = int(tot_bins) * tot_bins_multiplier

Somnath, Suhas's avatar
Somnath, Suhas committed
249
        if 'parm_mat' in path_dict.keys():
250
251
252
            if verbose:
                print('\treading BE arrays from parameters text file')
            bin_inds, bin_freqs, bin_FFT, ex_wfm = self.__read_parms_mat(path_dict['parm_mat'], isBEPS)
Somnath, Suhas's avatar
Somnath, Suhas committed
253
        elif 'old_mat_parms' in path_dict.keys():
254
255
256
            if verbose:
                print('\treading BE arrays from old mat text file')
            bin_inds, bin_freqs, bin_FFT, ex_wfm, dc_amp_vec = self.__read_old_mat_be_vecs(path_dict['old_mat_parms'])
Somnath, Suhas's avatar
Somnath, Suhas committed
257
        else:
258
259
            if verbose:
                print('\tGenerating dummy BE arrays')
Unknown's avatar
Unknown committed
260
            band_width = parm_dict['BE_band_width_[Hz]'] * (0.5 - parm_dict['BE_band_edge_trim'])
Somnath, Suhas's avatar
Somnath, Suhas committed
261
            st_f = parm_dict['BE_center_frequency_[Hz]'] - band_width
Unknown's avatar
Unknown committed
262
            en_f = parm_dict['BE_center_frequency_[Hz]'] + band_width
Somnath, Suhas's avatar
Somnath, Suhas committed
263
            bin_freqs = np.linspace(st_f, en_f, tot_bins, dtype=np.float32)
Unknown's avatar
Unknown committed
264

265
            warn('No parms .mat file found.... Filling dummy values into ancillary datasets.')
Somnath, Suhas's avatar
Somnath, Suhas committed
266
267
268
            bin_inds = np.zeros(shape=tot_bins, dtype=np.int32)
            bin_FFT = np.zeros(shape=tot_bins, dtype=np.complex64)
            ex_wfm = np.zeros(shape=100, dtype=np.float32)
Unknown's avatar
Unknown committed
269

Somnath, Suhas's avatar
Somnath, Suhas committed
270
271
272
273
274
        # Forcing standardized datatypes:
        bin_inds = np.int32(bin_inds)
        bin_freqs = np.float32(bin_freqs)
        bin_FFT = np.complex64(bin_FFT)
        ex_wfm = np.float32(ex_wfm)
275

Somnath, Suhas's avatar
Somnath, Suhas committed
276
        self.FFT_BE_wave = bin_FFT
277

Somnath, Suhas's avatar
Somnath, Suhas committed
278
        if isBEPS:
279
            if verbose:
280
                print('\tBuilding UDVS table for BEPS')
281
            UDVS_labs, UDVS_units, UDVS_mat = self.__build_udvs_table(parm_dict)
Unknown's avatar
Unknown committed
282

283
            if verbose:
284
                print('\tTrimming UDVS table to remove unused plot group columns')
285
            UDVS_mat, UDVS_labs, UDVS_units = trimUDVS(UDVS_mat, UDVS_labs, UDVS_units, ignored_plt_grps)
Unknown's avatar
Unknown committed
286

287
            old_spec_inds = np.zeros(shape=(2, tot_bins), dtype=INDICES_DTYPE)
Unknown's avatar
Unknown committed
288

289
            # Will assume that all excitation waveforms have same num of bins
Unknown's avatar
Unknown committed
290
291
            num_actual_udvs_steps = UDVS_mat.shape[0] / udvs_denom
            bins_per_step = tot_bins / num_actual_udvs_steps
292
293
294
            if verbose:
                print('\t# UDVS steps: {}, # bins/step: {}'
                      ''.format(num_actual_udvs_steps, bins_per_step))
Unknown's avatar
Unknown committed
295

Somnath, Suhas's avatar
Somnath, Suhas committed
296
            if bins_per_step % 1:
Somnath, Suhas's avatar
Somnath, Suhas committed
297
298
                print('UDVS mat shape: {}, total bins: {}, bins per step: {}'.format(UDVS_mat.shape, tot_bins,
                                                                                     bins_per_step))
299
                raise ValueError('Non integer number of bins per step!')
Unknown's avatar
Unknown committed
300

Somnath, Suhas's avatar
Somnath, Suhas committed
301
302
            bins_per_step = int(bins_per_step)
            num_actual_udvs_steps = int(num_actual_udvs_steps)
Unknown's avatar
Unknown committed
303
304
305

            stind = 0
            for step_index in range(UDVS_mat.shape[0]):
Unknown's avatar
Unknown committed
306
307
308
                if UDVS_mat[step_index, 2] < 1E-3:  # invalid AC amplitude
                    continue
                # Bin step
309
                old_spec_inds[0, stind:stind + bins_per_step] = np.arange(bins_per_step, dtype=INDICES_DTYPE)
Unknown's avatar
Unknown committed
310
                # UDVS step
311
                old_spec_inds[1, stind:stind + bins_per_step] = step_index * np.ones(bins_per_step, dtype=INDICES_DTYPE)
Somnath, Suhas's avatar
Somnath, Suhas committed
312
                stind += bins_per_step
Somnath, Suhas's avatar
Somnath, Suhas committed
313
            del stind, step_index
Unknown's avatar
Unknown committed
314

Somnath, Suhas's avatar
Somnath, Suhas committed
315
        else:  # BE Line
316
317
            if verbose:
                print('\tPreparing supporting variables since BE-Line')
Somnath, Suhas's avatar
Somnath, Suhas committed
318
            self.signal_type = 1
Somnath, Suhas's avatar
Somnath, Suhas committed
319
            self.expt_type = 1  # Stephen has not used this index for some reason
Somnath, Suhas's avatar
Somnath, Suhas committed
320
321
            num_actual_udvs_steps = 1
            bins_per_step = tot_bins
Somnath, Suhas's avatar
Somnath, Suhas committed
322
            UDVS_labs = ['step_num', 'dc_offset', 'ac_amp', 'wave_type', 'wave_mod', 'be-line']
Somnath, Suhas's avatar
Somnath, Suhas committed
323
            UDVS_units = ['', 'V', 'A', '', '', '']
Somnath, Suhas's avatar
Somnath, Suhas committed
324
325
            UDVS_mat = np.array([1, 0, parm_dict['BE_amplitude_[V]'], 1, 1, 1],
                                dtype=np.float32).reshape(1, len(UDVS_labs))
Somnath, Suhas's avatar
Somnath, Suhas committed
326

Chris Smith's avatar
Chris Smith committed
327
328
            old_spec_inds = np.vstack((np.arange(tot_bins, dtype=INDICES_DTYPE),
                                       np.zeros(tot_bins, dtype=INDICES_DTYPE)))
Unknown's avatar
Unknown committed
329

Somnath, Suhas's avatar
Somnath, Suhas committed
330
331
332
        # Some very basic information that can help the processing / analysis crew
        parm_dict['num_bins'] = tot_bins
        parm_dict['num_pix'] = num_pix
333
        parm_dict['num_udvs_steps'] = num_actual_udvs_steps
Rama Vasudevan's avatar
Rama Vasudevan committed
334
        parm_dict['num_steps'] = num_actual_udvs_steps
Unknown's avatar
Unknown committed
335

336
337
        if verbose:
            print('\tPreparing UDVS slices for region references')
Somnath, Suhas's avatar
Somnath, Suhas committed
338
        udvs_slices = dict()
Somnath, Suhas's avatar
Somnath, Suhas committed
339
        for col_ind, col_name in enumerate(UDVS_labs):
Unknown's avatar
Unknown committed
340
341
            udvs_slices[col_name] = (slice(None), slice(col_ind, col_ind + 1))

Somnath, Suhas's avatar
Somnath, Suhas committed
342
        # Need to add the Bin Waveform type - infer from UDVS        
Unknown's avatar
Unknown committed
343
        exec_bin_vec = self.signal_type * np.ones(len(bin_inds), dtype=np.int32)
Somnath, Suhas's avatar
Somnath, Suhas committed
344
345

        if self.expt_type == 2:
346
347
            if verbose:
                print('\tExperiment type = 2. Doubling BE vectors')
Unknown's avatar
Unknown committed
348
            exec_bin_vec = np.hstack((exec_bin_vec, -1 * exec_bin_vec))
Somnath, Suhas's avatar
Somnath, Suhas committed
349
350
            bin_inds = np.hstack((bin_inds, bin_inds))
            bin_freqs = np.hstack((bin_freqs, bin_freqs))
Somnath, Suhas's avatar
Somnath, Suhas committed
351
            # This is wrong but I don't know what else to do
Somnath, Suhas's avatar
Somnath, Suhas committed
352
            bin_FFT = np.hstack((bin_FFT, bin_FFT))
Unknown's avatar
Unknown committed
353

Somnath, Suhas's avatar
Somnath, Suhas committed
354
        # Create Spectroscopic Values and Spectroscopic Values Labels datasets
355
        # This is an old and legacy way of doing things. Ideally, all we would need ot do is just get the unit values
356
357
        if verbose:
            print('\tCalculating spectroscopic values')
Somnath, Suhas's avatar
Somnath, Suhas committed
358
        spec_vals, spec_inds, spec_vals_labs, spec_vals_units, spec_vals_labs_names = createSpecVals(UDVS_mat,
359
                                                                                                     old_spec_inds,
Somnath, Suhas's avatar
Somnath, Suhas committed
360
361
362
363
364
                                                                                                     bin_freqs,
                                                                                                     exec_bin_vec,
                                                                                                     parm_dict,
                                                                                                     UDVS_labs,
                                                                                                     UDVS_units)
365
366
367
368
        # Not sure what is happening here but this should work.
        spec_dim_dict = dict()
        for entry in spec_vals_labs_names:
            spec_dim_dict[entry[0] + '_parameters'] = entry[1]
Chris Smith's avatar
Chris Smith committed
369

Somnath, Suhas's avatar
Somnath, Suhas committed
370
371
372
        spec_vals_slices = dict()

        for row_ind, row_name in enumerate(spec_vals_labs):
Unknown's avatar
Unknown committed
373
            spec_vals_slices[row_name] = (slice(row_ind, row_ind + 1), slice(None))
Somnath, Suhas's avatar
Somnath, Suhas committed
374

375
        if path.exists(h5_path):
376
377
            if verbose:
                print('\tRemoving existing / old translated file: ' + h5_path)
378
            remove(h5_path)
Chris Smith's avatar
Chris Smith committed
379

380
        # First create the file
ssomnath's avatar
ssomnath committed
381
        h5_f = h5py.File(h5_path, mode='w')
Somnath, Suhas's avatar
Somnath, Suhas committed
382

383
        # Then write root level attributes
384
        global_parms = dict()
Somnath, Suhas's avatar
Somnath, Suhas committed
385
386
        global_parms['grid_size_x'] = parm_dict['grid_num_cols']
        global_parms['grid_size_y'] = parm_dict['grid_num_rows']
Somnath, Suhas's avatar
Somnath, Suhas committed
387
388
389
390
        try:
            global_parms['experiment_date'] = parm_dict['File_date_and_time']
        except KeyError:
            global_parms['experiment_date'] = '1:1:1'
Chris Smith's avatar
Chris Smith committed
391

Somnath, Suhas's avatar
Somnath, Suhas committed
392
        # assuming that the experiment was completed:
Unknown's avatar
Unknown committed
393
394
        global_parms['current_position_x'] = parm_dict['grid_num_cols'] - 1
        global_parms['current_position_y'] = parm_dict['grid_num_rows'] - 1
Somnath, Suhas's avatar
Somnath, Suhas committed
395
        global_parms['data_type'] = parm_dict['data_type']
Somnath, Suhas's avatar
Somnath, Suhas committed
396
        global_parms['translator'] = 'ODF'
397
398
        if verbose:
            print('\tWriting attributes to HDF5 file root')
399
        write_simple_attrs(h5_f, global_parms)
400
        write_book_keeping_attrs(h5_f)
Unknown's avatar
Unknown committed
401

402
403
        # Then create the measurement group
        h5_meas_group = create_indexed_group(h5_f, 'Measurement')
Unknown's avatar
Unknown committed
404

405
        # Write attributes at the measurement group level
406
407
        if verbose:
            print('\twriting attributes to Measurement group')
408
        write_simple_attrs(h5_meas_group, parm_dict)
Unknown's avatar
Unknown committed
409

410
411
        # Create the Channel group
        h5_chan_grp = create_indexed_group(h5_meas_group, 'Channel')
Unknown's avatar
Unknown committed
412

413
        # Write channel group attributes
Rama Vasudevan's avatar
Rama Vasudevan committed
414
415
        write_simple_attrs(h5_chan_grp, {'Channel_Input': 'IO_Analog_Input_1',
                                         'channel_type': 'BE'})
Unknown's avatar
Unknown committed
416

417
        # Now the datasets!
418
419
        if verbose:
            print('\tCreating ancillary datasets')
Chris Smith's avatar
Chris Smith committed
420
        h5_chan_grp.create_dataset('Excitation_Waveform', data=ex_wfm)
Unknown's avatar
Unknown committed
421

422
423
424
        h5_udvs = h5_chan_grp.create_dataset('UDVS', data=UDVS_mat)
        write_region_references(h5_udvs, udvs_slices, add_labels_attr=True, verbose=verbose)
        write_simple_attrs(h5_udvs, {'units': UDVS_units}, verbose=verbose)
425

Chris Smith's avatar
Chris Smith committed
426
        h5_chan_grp.create_dataset('UDVS_Indices', data=old_spec_inds[1])
427

Chris Smith's avatar
Chris Smith committed
428
429
        h5_chan_grp.create_dataset('Bin_Step', data=np.arange(bins_per_step, dtype=INDICES_DTYPE),
                                   dtype=INDICES_DTYPE)
430

Chris Smith's avatar
Chris Smith committed
431
432
433
434
        h5_chan_grp.create_dataset('Bin_Indices', data=bin_inds, dtype=INDICES_DTYPE)
        h5_chan_grp.create_dataset('Bin_Frequencies', data=bin_freqs)
        h5_chan_grp.create_dataset('Bin_FFT', data=bin_FFT)
        h5_chan_grp.create_dataset('Bin_Wfm_Type', data=exec_bin_vec)
435

436
437
438
439
440
        if verbose:
            print('\tWriting Position datasets')

        pos_dims = [Dimension('X', 'm', np.arange(num_cols)),
                    Dimension('Y', 'm', np.arange(num_rows))]
441
442
        h5_pos_ind, h5_pos_val = write_ind_val_dsets(h5_chan_grp, pos_dims, is_spectral=False, verbose=verbose)

443
444
        if verbose:
            print('\tWriting Spectroscopic datasets')
445
446
447
448
449
        h5_spec_inds = h5_chan_grp.create_dataset('Spectroscopic_Indices', data=spec_inds, dtype=INDICES_DTYPE)        
        h5_spec_vals = h5_chan_grp.create_dataset('Spectroscopic_Values', data=np.array(spec_vals), dtype=VALUES_DTYPE)
        for dset in [h5_spec_inds, h5_spec_vals]:
            write_region_references(dset, spec_vals_slices, add_labels_attr=True, verbose=verbose)
            write_simple_attrs(dset, {'units': spec_vals_units}, verbose=verbose)
450
            write_simple_attrs(dset, spec_dim_dict)
451
452

        # Noise floor should be of shape: (udvs_steps x 3 x positions)
453
454
        if verbose:
            print('\tWriting noise floor dataset')
Chris Smith's avatar
Chris Smith committed
455
456
        h5_chan_grp.create_dataset('Noise_Floor', (num_pix, num_actual_udvs_steps), dtype=nf32,
                                   chunks=(1, num_actual_udvs_steps))
457
458
459
460
461
462
463
464
465
466
467

        """
        New Method for chunking the Main_Data dataset.  Chunking is now done in N-by-N squares
        of UDVS steps by pixels.  N is determined dynamically based on the dimensions of the
        dataset.  Currently it is set such that individual chunks are less than 10kB in size.

        Chris Smith -- csmith55@utk.edu
        """
        BEPS_chunks = calc_chunks([num_pix, tot_bins],
                                  np.complex64(0).itemsize,
                                  unit_chunks=(1, bins_per_step))
468
469
470
        if verbose:
            print('\tHDF5 dataset will have chunks of size: {}'.format(BEPS_chunks))
            print('\tCreating empty main dataset')
471
472
473
474
        self.h5_raw = write_main_dataset(h5_chan_grp, (num_pix, tot_bins), 'Raw_Data', 'Piezoresponse', 'V', None, None,
                                         dtype=np.complex64, chunks=BEPS_chunks, compression='gzip',
                                         h5_pos_inds=h5_pos_ind, h5_pos_vals=h5_pos_val, h5_spec_inds=h5_spec_inds,
                                         h5_spec_vals=h5_spec_vals, verbose=verbose)
Somnath, Suhas's avatar
Somnath, Suhas committed
475

476
477
478
479
        if verbose:
            print('\tReading data from binary data files into raw HDF5')
        self._read_data(UDVS_mat, parm_dict, path_dict, real_size, isBEPS,
                        add_pix, verbose=verbose)
Unknown's avatar
Unknown committed
480

481
482
        if verbose:
            print('\tGenerating plot groups')
483
        generatePlotGroups(self.h5_raw, self.mean_resp, folder_path, basename,
Somnath, Suhas's avatar
Somnath, Suhas committed
484
                           self.max_resp, self.min_resp, max_mem_mb=self.max_ram,
Somnath, Suhas's avatar
Somnath, Suhas committed
485
                           spec_label=spec_label, show_plots=show_plots, save_plots=save_plots,
Unknown's avatar
Unknown committed
486
                           do_histogram=do_histogram, debug=verbose)
487
488
        if verbose:
            print('\tUpgrading to USIDataset')
489
        self.h5_raw = USIDataset(self.h5_raw)
Unknown's avatar
Unknown committed
490
491
492

        # Go ahead and read the current data in the second (current) channel
        if current_data_exists:                     #If a .dat file matches
493
494
            if verbose:
                print('\tReading data in secondary channels (current)')
495
496
            self._read_secondary_channel(h5_meas_group, aux_files)

497
498
        if verbose:
            print('\tClosing HDF5 file')
499
        h5_f.close()
Unknown's avatar
Unknown committed
500

Somnath, Suhas's avatar
Somnath, Suhas committed
501
        return h5_path
Chris Smith's avatar
Chris Smith committed
502

503
504
    def _read_data(self, UDVS_mat, parm_dict, path_dict, real_size, isBEPS,
                   add_pix, verbose=False):
Chris Smith's avatar
Chris Smith committed
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
        """
        Checks if the data is BEPS or BELine and calls the correct function to read the data from
        file

        Parameters
        ----------
        UDVS_mat : numpy.ndarray of float
            UDVS table
        parm_dict : dict
            Experimental parameters
        path_dict : dict
            Dictionary of data files to be read
        real_size : dict
            Size of each data file
        isBEPS : boolean
            Is the data BEPS
        add_pix : boolean
            Does the reader need to add extra pixels to the end of the dataset
523
524
        verbose : bool, optional. Default = False
            Whether or not to print logs
Chris Smith's avatar
Chris Smith committed
525
526
527
528
529
530
531
532

        Returns
        -------
        None
        """
        # Now read the raw data files:
        if not isBEPS:
            # Do this for all BE-Line (always small enough to read in one shot)
533
534
            if verbose:
                print('\t\tReading all raw data for BE-Line in one shot')
535
            self.__quick_read_data(path_dict['read_real'], path_dict['read_imag'], parm_dict['num_udvs_steps'])
Chris Smith's avatar
Chris Smith committed
536
537
        elif real_size < self.max_ram and parm_dict['VS_measure_in_field_loops'] == 'out-of-field':
            # Do this for out-of-field BEPS ONLY that is also small (256 MB)
538
539
            if verbose:
                print('\t\tReading all raw BEPS (out-of-field) data in one shot')
540
            self.__quick_read_data(path_dict['read_real'], path_dict['read_imag'], parm_dict['num_udvs_steps'])
Chris Smith's avatar
Chris Smith committed
541
542
        elif real_size < self.max_ram and parm_dict['VS_measure_in_field_loops'] == 'in-field':
            # Do this for in-field only
543
544
            if verbose:
                print('\t\tReading all raw BEPS (in-field only) data in one shot')
545
            self.__quick_read_data(path_dict['write_real'], path_dict['write_imag'], parm_dict['num_udvs_steps'])
Chris Smith's avatar
Chris Smith committed
546
547
        else:
            # Large BEPS datasets OR those with in-and-out of field
548
549
            if verbose:
                print('\t\tReading all raw data for in and out of filed OR very large file')
Somnath, Suhas's avatar
Somnath, Suhas committed
550
            self.__read_beps_data(path_dict, UDVS_mat.shape[0], parm_dict['VS_measure_in_field_loops'], add_pix)
551
        self.h5_raw.file.flush()
Chris Smith's avatar
Chris Smith committed
552

Somnath, Suhas's avatar
Somnath, Suhas committed
553
    def __read_beps_data(self, path_dict, udvs_steps, mode, add_pixel=False):
Somnath, Suhas's avatar
Somnath, Suhas committed
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
        """
        Reads the imaginary and real data files pixelwise and writes to the H5 file 
        
        Parameters 
        --------------------
        path_dict : dictionary
            Dictionary containing the absolute paths of the real and imaginary data files
        udvs_steps : unsigned int
            Number of UDVS steps
        mode : String / Unicode
            'in-field', 'out-of-field', or 'in and out-of-field'
        add_pixel : boolean. (Optional; default is False)
            If an empty pixel worth of data should be written to the end             
        
        Returns 
        -------------------- 
        None
        """
Unknown's avatar
Unknown committed
572

Somnath, Suhas's avatar
Somnath, Suhas committed
573
        print('---- reading pixel-by-pixel ----------')
Unknown's avatar
Unknown committed
574
575
576
577

        bytes_per_pix = self.h5_raw.shape[1] * 4
        step_size = self.h5_raw.shape[1] / udvs_steps

Somnath, Suhas's avatar
Somnath, Suhas committed
578
        if mode == 'out-of-field':
Unknown's avatar
Unknown committed
579
            parsers = [BEodfParser(path_dict['read_real'], path_dict['read_imag'],
Somnath, Suhas's avatar
Somnath, Suhas committed
580
                                   self.h5_raw.shape[0], bytes_per_pix)]
Somnath, Suhas's avatar
Somnath, Suhas committed
581
        elif mode == 'in-field':
Unknown's avatar
Unknown committed
582
            parsers = [BEodfParser(path_dict['write_real'], path_dict['write_imag'],
Somnath, Suhas's avatar
Somnath, Suhas committed
583
                                   self.h5_raw.shape[0], bytes_per_pix)]
Somnath, Suhas's avatar
Somnath, Suhas committed
584
585
        elif mode == 'in and out-of-field':
            # each file will only have half the udvs steps:
Unknown's avatar
Unknown committed
586
            if 0.5 * udvs_steps % 1:
587
588
                raise ValueError('Odd number of UDVS')

Unknown's avatar
Unknown committed
589
            udvs_steps = int(0.5 * udvs_steps)
Somnath, Suhas's avatar
Somnath, Suhas committed
590
            # be careful - each pair contains only half the necessary bins - so read half
Unknown's avatar
Unknown committed
591
            parsers = [BEodfParser(path_dict['write_real'], path_dict['write_imag'],
Somnath, Suhas's avatar
Somnath, Suhas committed
592
                                   self.h5_raw.shape[0], int(bytes_per_pix / 2)),
Unknown's avatar
Unknown committed
593
594
595
                       BEodfParser(path_dict['read_real'], path_dict['read_imag'],
                                   self.h5_raw.shape[0], int(bytes_per_pix / 2))]

Somnath, Suhas's avatar
Somnath, Suhas committed
596
            if step_size % 1:
597
598
                raise ValueError('strange number of bins per UDVS step. Exiting')

Somnath, Suhas's avatar
Somnath, Suhas committed
599
            step_size = int(step_size)
600

601
602
        rand_spectra = self.__get_random_spectra(parsers, self.h5_raw.shape[0], udvs_steps, step_size,
                                                 num_spectra=self.num_rand_spectra)
603
        take_conjugate = requires_conjugate(rand_spectra, cores=self._cores)
604

Somnath, Suhas's avatar
Somnath, Suhas committed
605
606
607
608
        self.mean_resp = np.zeros(shape=(self.h5_raw.shape[1]), dtype=np.complex64)
        self.max_resp = np.zeros(shape=(self.h5_raw.shape[0]), dtype=np.float32)
        self.min_resp = np.zeros(shape=(self.h5_raw.shape[0]), dtype=np.float32)

Unknown's avatar
Unknown committed
609
        numpix = self.h5_raw.shape[0]
Somnath, Suhas's avatar
Somnath, Suhas committed
610
611
612
        """ 
        Don't try to do the last step if a pixel is missing.   
        This will be handled after the loop. 
Unknown's avatar
Unknown committed
613
614
615
616
        """
        if add_pixel:
            numpix -= 1

Somnath, Suhas's avatar
Somnath, Suhas committed
617
        for pix_indx in range(numpix):
Somnath, Suhas's avatar
Somnath, Suhas committed
618
            if self.h5_raw.shape[0] > 5:
Unknown's avatar
Unknown committed
619
620
621
                if pix_indx % int(round(self.h5_raw.shape[0] / 10)) == 0:
                    print('Reading... {} complete'.format(round(100 * pix_indx / self.h5_raw.shape[0])))

Somnath, Suhas's avatar
Somnath, Suhas committed
622
623
624
            # get the raw stream from each parser
            pxl_data = list()
            for prsr in parsers:
Somnath, Suhas's avatar
Somnath, Suhas committed
625
                pxl_data.append(prsr.read_pixel())
Unknown's avatar
Unknown committed
626

Somnath, Suhas's avatar
Somnath, Suhas committed
627
628
629
630
631
            # interleave if both in and out of field
            # we are ignoring user defined possibilities...
            if mode == 'in and out-of-field':
                in_fld = pxl_data[0]
                out_fld = pxl_data[1]
Unknown's avatar
Unknown committed
632

Somnath, Suhas's avatar
Somnath, Suhas committed
633
634
                in_fld_2 = in_fld.reshape(udvs_steps, step_size)
                out_fld_2 = out_fld.reshape(udvs_steps, step_size)
Unknown's avatar
Unknown committed
635
                raw_mat = np.empty((udvs_steps * 2, step_size), dtype=out_fld.dtype)
Somnath, Suhas's avatar
Somnath, Suhas committed
636
637
                raw_mat[0::2, :] = in_fld_2
                raw_mat[1::2, :] = out_fld_2
Somnath, Suhas's avatar
Somnath, Suhas committed
638
639
                raw_vec = raw_mat.reshape(in_fld.size + out_fld.size).transpose()
            else:
Somnath, Suhas's avatar
Somnath, Suhas committed
640
                raw_vec = pxl_data[0]  # only one parser
Somnath, Suhas's avatar
Somnath, Suhas committed
641
642
            self.max_resp[pix_indx] = np.max(np.abs(raw_vec))
            self.min_resp[pix_indx] = np.min(np.abs(raw_vec))
Unknown's avatar
Unknown committed
643
            self.mean_resp = (1 / (pix_indx + 1)) * (raw_vec + pix_indx * self.mean_resp)
644
645
646

            if take_conjugate:
                raw_vec = np.conjugate(raw_vec)
647
            self.h5_raw[pix_indx, :] = np.complex64(raw_vec[:])
648
            self.h5_raw.file.flush()
Unknown's avatar
Unknown committed
649

Somnath, Suhas's avatar
Somnath, Suhas committed
650
        # Add zeros to main_data for the missing pixel. 
Unknown's avatar
Unknown committed
651
652
653
        if add_pixel:
            self.h5_raw[-1, :] = 0 + 0j

Somnath, Suhas's avatar
Somnath, Suhas committed
654
        print('---- Finished reading files -----')
655
656

    def __quick_read_data(self, real_path, imag_path, udvs_steps):
Somnath, Suhas's avatar
Somnath, Suhas committed
657
        """
Somnath, Suhas's avatar
Somnath, Suhas committed
658
659
660
661
662
663
664
665
        Returns information about the excitation BE waveform present in the .mat file

        Parameters
        -----------
        real_path : String / Unicode
            Absolute file path of the real data file
        imag_path : String / Unicode
            Absolute file path of the real data file
666
667
        udvs_steps : unsigned int
            Number of UDVS steps
Somnath, Suhas's avatar
Somnath, Suhas committed
668
        """
Unknown's avatar
Unknown committed
669
        print('---- reading all data at once ----------')
Somnath, Suhas's avatar
Somnath, Suhas committed
670

Unknown's avatar
Unknown committed
671
        parser = BEodfParser(real_path, imag_path, self.h5_raw.shape[0], self.h5_raw.shape[1] * 4)
672
673

        step_size = self.h5_raw.shape[1] / udvs_steps
674
675
        rand_spectra = self.__get_random_spectra([parser], self.h5_raw.shape[0], udvs_steps, step_size,
                                                 num_spectra=self.num_rand_spectra)
676
        take_conjugate = requires_conjugate(rand_spectra, cores=self._cores)
Somnath, Suhas's avatar
Somnath, Suhas committed
677
        raw_vec = parser.read_all_data()
678
        if take_conjugate:
679
            print('Taking conjugate to ensure positive Quality factors')
680
            raw_vec = np.conjugate(raw_vec)
Unknown's avatar
Unknown committed
681

Rama Vasudevan's avatar
Rama Vasudevan committed
682
683
684
685
686
687
688
689
690
691
692
693
        if raw_vec.shape != np.prod(self.h5_raw.shape):
            percentage_padded = 100 * (np.prod(self.h5_raw.shape) - raw_vec.shape) / np.prod(self.h5_raw.shape)
            print('Warning! Raw data length {} is not matching placeholder length {}. '
                  'Padding zeros for {}% of the data!'.format(raw_vec.shape, np.prod(self.h5_raw.shape), percentage_padded))

            padded_raw_vec = np.zeros(np.prod(self.h5_raw.shape), dtype = np.complex64)

            padded_raw_vec[:raw_vec.shape[0]] = raw_vec
            raw_mat = padded_raw_vec.reshape(self.h5_raw.shape[0], self.h5_raw.shape[1])
        else:
            raw_mat = raw_vec.reshape(self.h5_raw.shape[0], self.h5_raw.shape[1])

Unknown's avatar
Unknown committed
694

Somnath, Suhas's avatar
Somnath, Suhas committed
695
        # Write to the h5 dataset:
Somnath, Suhas's avatar
Somnath, Suhas committed
696
697
698
        self.mean_resp = np.mean(raw_mat, axis=0)
        self.max_resp = np.amax(np.abs(raw_mat), axis=0)
        self.min_resp = np.amin(np.abs(raw_mat), axis=0)
699
        self.h5_raw[:, :] = np.complex64(raw_mat)
700
        self.h5_raw.file.flush()
Somnath, Suhas's avatar
Somnath, Suhas committed
701

Unknown's avatar
Unknown committed
702
703
        print('---- Finished reading files -----')

704
705
    @staticmethod
    def _parse_file_path(data_filepath):
Somnath, Suhas's avatar
Somnath, Suhas committed
706
707
708
709
710
711
712
        """
        Returns the basename and a dictionary containing the absolute file paths for the
        real and imaginary data files, text and mat parameter files in a dictionary
        
        Parameters 
        --------------------
        data_filepath: String / Unicode
Somnath, Suhas's avatar
Somnath, Suhas committed
713
            Absolute path of any file in the same directory as the .dat files
Somnath, Suhas's avatar
Somnath, Suhas committed
714
715
716
717
718
719
720
721
722
        
        Returns 
        --------------------
        basename : String / Unicode
            Basename of the dataset      
        path_dict : Dictionary
            Dictionary containing absolute paths of all necessary data and parameter files
        """
        (folder_path, basename) = path.split(data_filepath)
Unknown's avatar
Unknown committed
723
        (super_folder, basename) = path.split(folder_path)
Somnath, Suhas's avatar
Somnath, Suhas committed
724

725
726
        if basename.endswith('_d') or basename.endswith('_c'):
            # Old old data format where the folder ended with a _d or _c to denote a completed spectroscopic run
Somnath, Suhas's avatar
Somnath, Suhas committed
727
728
729
730
731
732
733
734
            basename = basename[:-2]
        """
        A single pair of real and imaginary files are / were generated for:
            BE-Line and BEPS (compiled version only generated out-of-field or 'read')
        Two pairs of real and imaginary files were generated for later BEPS datasets
            These have 'read' and 'write' prefixes to denote out or in field respectively
        """
        path_dict = dict()
Unknown's avatar
Unknown committed
735

Somnath, Suhas's avatar
Somnath, Suhas committed
736
        for file_name in listdir(folder_path):
Chris Smith's avatar
Chris Smith committed
737
            abs_path = path.join(folder_path, file_name)
Somnath, Suhas's avatar
Somnath, Suhas committed
738
739
740
741
742
            if file_name.endswith('.txt') and file_name.find('parm') > 0:
                path_dict['parm_txt'] = abs_path
            elif file_name.find('.mat') > 0:
                if file_name.find('more_parms') > 0:
                    path_dict['parm_mat'] = abs_path
Unknown's avatar
Unknown committed
743
                elif file_name == (basename + '.mat'):
Somnath, Suhas's avatar
Somnath, Suhas committed
744
745
746
747
748
749
750
751
752
753
754
755
                    path_dict['old_mat_parms'] = abs_path
            elif file_name.endswith('.dat'):
                # Need to account for the second AI channel here
                file_tag = 'read'
                if file_name.find('write') > 0:
                    file_tag = 'write'
                if file_name.find('real') > 0:
                    file_tag += '_real'
                elif file_name.find('imag') > 0:
                    file_tag += '_imag'
                path_dict[file_tag] = abs_path

Chris Smith's avatar
Chris Smith committed
756
        return basename, path_dict
Somnath, Suhas's avatar
Somnath, Suhas committed
757

758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
    def _read_secondary_channel(self, h5_meas_group, aux_file_path):
        """
        Reads secondary channel stored in AI .mat file
        Currently works for in-field measurements only, but should be updated to
        include both in and out of field measurements

        Parameters
        -----------
        h5_meas_group : h5 group
            Reference to the Measurement group
        aux_file_path : String / Unicode
            Absolute file path of the secondary channel file.
        """
        print('---- Reading Secondary Channel  ----------')
        if len(aux_file_path)>1:
            print('Detected multiple files, assuming in and out of field')
            aux_file_paths = aux_file_path
        else:
            aux_file_paths = list(aux_file_path)

        freq_index = self.h5_raw.spec_dim_labels.index('Frequency')
        num_pix = self.h5_raw.shape[0]
        spectral_len = 1

        for i in range(len(self.h5_raw.spec_dim_sizes)):
            if i == freq_index:
                continue
            spectral_len = spectral_len * self.h5_raw.spec_dim_sizes[i]

        #num_forc_cycles = self.h5_raw.spec_dim_sizes[self.h5_raw.spec_dim_labels.index("FORC")]
        #num_dc_steps =  self.h5_raw.spec_dim_sizes[self.h5_raw.spec_dim_labels.index("DC_Offset")]

        # create a new channel
        h5_current_channel_group = create_indexed_group(h5_meas_group, 'Channel')

        # Copy attributes from the main channel
        copy_attributes(self.h5_raw.parent, h5_current_channel_group)

        # Modify attributes that are different
        write_simple_attrs(h5_current_channel_group, {'Channel_Input': 'IO_Analog_Input_2',
                                                      'channel_type': 'Current'}, verbose=True)

        #Get the reduced dimensions
801
        h5_current_spec_inds, h5_current_spec_values = write_reduced_anc_dsets(h5_current_channel_group,
802
                                                        self.h5_raw.h5_spec_inds,
803
                                                        self.h5_raw.h5_spec_vals, 'Frequency', is_spec=True)
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858


        h5_current_main = write_main_dataset(h5_current_channel_group,  # parent HDF5 group
                                             (num_pix, spectral_len),  # shape of Main dataset
                                             'Raw_Data',  # Name of main dataset
                                             'Current',  # Physical quantity contained in Main dataset
                                             'nA',  # Units for the physical quantity
                                             None,  # Position dimensions
                                             None,  # Spectroscopic dimensions
                                             h5_pos_inds=self.h5_raw.h5_pos_inds,
                                             h5_pos_vals=self.h5_raw.h5_pos_vals,
                                             h5_spec_inds=h5_current_spec_inds,
                                             h5_spec_vals=h5_current_spec_values,
                                             dtype=np.float32,  # data type / precision
                                             main_dset_attrs={'IO_rate': 4E+6, 'Amplifier_Gain': 9})

        # Now calculate the number of positions that can be stored in memory in one go.
        b_per_position = np.float32(0).itemsize * spectral_len

        max_pos_per_read = int(np.floor((get_available_memory()) / b_per_position))

        # if self._verbose:
        print('Allowed to read {} pixels per chunk'.format(max_pos_per_read))

        #Open the read and write files and write them to the hdf5 file
        for aux_file in aux_file_paths:
            if 'write' in aux_file:
                infield = True
            else:
                infield=False

            cur_file = open(aux_file, "rb")

            start_pix = 0

            while start_pix < num_pix:
                end_pix = min(num_pix, start_pix + max_pos_per_read)

                # TODO: Fix for when it won't fit in memory.

                #if max_pos_per_read * b_per_position > num_pix * b_per_position:
                cur_data = np.frombuffer(cur_file.read(), dtype='f')
                #else:
                #cur_data = np.frombuffer(cur_file.read(max_pos_per_read * b_per_position), dtype='f')

                cur_data = cur_data.reshape(end_pix - start_pix, spectral_len//2)

                # Write to h5
                if infield:
                    h5_current_main[start_pix:end_pix, ::2] = cur_data
                else:
                    h5_current_main[start_pix:end_pix, 1::2] = cur_data
                start_pix = end_pix


Somnath, Suhas's avatar
Somnath, Suhas committed
859
860
    @staticmethod
    def __read_old_mat_be_vecs(file_path):
Somnath, Suhas's avatar
Somnath, Suhas committed
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
        """
        Returns information about the excitation BE waveform present in the 
        more parms.mat file
        
        Parameters 
        --------------------
        filepath : String or unicode
            Absolute filepath of the .mat parameter file
        
        Returns 
        --------------------
        bin_inds : 1D numpy unsigned int array
            Indices of the excited and measured frequency bins
        bin_w : 1D numpy float array
            Excitation bin Frequencies
        bin_FFT : 1D numpy complex array
            FFT of the BE waveform for the excited bins
        BE_wave : 1D numpy float array
            Band Excitation waveform
        dc_amp_vec_full : 1D numpy float array
            spectroscopic waveform. 
            This information will be necessary for fixing the UDVS for AC modulation for example
        """
Unknown's avatar
Unknown committed
884
        matread = loadmat(file_path, squeeze_me=True)
Somnath, Suhas's avatar
Somnath, Suhas committed
885
        BE_wave = matread['BE_wave']
Unknown's avatar
Unknown committed
886
        bin_inds = matread['bin_ind'] - 1  # Python base 0
Somnath, Suhas's avatar
Somnath, Suhas committed
887
888
889
        bin_w = matread['bin_w']
        dc_amp_vec_full = matread['dc_amp_vec_full']
        FFT_full = np.fft.fftshift(np.fft.fft(BE_wave))
Somnath, Suhas's avatar
Somnath, Suhas committed
890
891
        bin_FFT = np.conjugate(FFT_full[bin_inds])
        return bin_inds, bin_w, bin_FFT, BE_wave, dc_amp_vec_full
Unknown's avatar
Unknown committed
892

893
    @staticmethod
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
    def __infer_frac_phase(slopes):
        """
        Infers the VS cycle fraction and phase when parameters were
        stored in old mat files

        Parameters
        --------------------
        slopes : list / tuple
            Array of mean slopes of each fraction of a SINGLE cycle

        Returns
        --------------------
        tuple:
            fraction : float
                Fraction of VS cycle
            phase : float
                Phase offset for VS cycle
        """
        if all([_ > 0 for _ in slopes]):
            return 0.25, 0
        elif all([_ < 0 for _ in slopes]):
            return 0.25, 0.75
        elif all([_ > 0 for _ in slopes[:2]]) and all(
                [_ < 0 for _ in slopes[2:]]):
            return 0.5, 0
        elif all([_ < 0 for _ in slopes[:2]]) and all(
                [_ > 0 for _ in slopes[2:]]):
            return 0.5, 0.5
        elif all([_ > 0 for _ in slopes[:1]]) and all(
                [_ < 0 for _ in slopes[1:]]):
            return 0.75, 0
        elif all([_ > 0 for _ in slopes[:3]]) and all(
                [_ < 0 for _ in slopes[3:]]):
            return 0.75, 0.25
        elif all([_ < 0 for _ in slopes[:1]]) and all(
                [_ > 0 for _ in slopes[1:]]):
            return 0.75, 0.5
        elif all([_ < 0 for _ in slopes[:3]]) and all(
                [_ > 0 for _ in slopes[3:]]):
            return 0.75, 0.75
        elif slopes[0] > 0 and slopes[1] < 0 and slopes[2] < 0 and slopes[
            3] > 0:
            return 1, 0
        elif slopes[0] < 0 and slopes[1] > 0 and slopes[2] > 0 and slopes[
            3] < 0:
            return 1, 0.5
        else:
            return 0, 0

Somnath, Suhas's avatar
Somnath, Suhas committed
943
944
    @staticmethod
    def __get_parms_from_old_mat(file_path):
Somnath, Suhas's avatar
Somnath, Suhas committed
945
946
947
        """
        Formats parameters found in the old parameters .mat file into a dictionary
        as though the dataset had a parms.txt describing it
948
949

        Parameters
Somnath, Suhas's avatar
Somnath, Suhas committed
950
951
952
        --------------------
        file_path : Unicode / String
            absolute filepath of the .mat file containing the parameters
953
954

        Returns
Somnath, Suhas's avatar
Somnath, Suhas committed
955
956
957
958
959
960
        --------------------
        parm_dict : dictionary
            Parameters describing experiment
        """
        parm_dict = dict()
        matread = loadmat(file_path, squeeze_me=True)
Unknown's avatar
Unknown committed
961
962
963

        parm_dict['IO_rate'] = str(int(matread['AO_rate'] / 1E+6)) + ' MHz'

Somnath, Suhas's avatar
Somnath, Suhas committed
964
965
966
967
968
        position_vec = matread['position_vec']
        parm_dict['grid_current_row'] = position_vec[0]
        parm_dict['grid_current_col'] = position_vec[1]
        parm_dict['grid_num_rows'] = position_vec[2]
        parm_dict['grid_num_cols'] = position_vec[3]
Unknown's avatar
Unknown committed
969

970
971
        if position_vec[0] != position_vec[1] or position_vec[2] != \
                position_vec[3]:
Somnath, Suhas's avatar
Somnath, Suhas committed
972
            warn('WARNING: Incomplete dataset. Translation not guaranteed!')
973
974
            parm_dict['grid_num_rows'] = position_vec[
                0]  # set to number of present cols and rows
Somnath, Suhas's avatar
Somnath, Suhas committed
975
            parm_dict['grid_num_cols'] = position_vec[1]
Unknown's avatar
Unknown committed
976

Somnath, Suhas's avatar
Somnath, Suhas committed
977
        BE_parm_vec_1 = matread['BE_parm_vec_1']
Rama Vasudevan's avatar
Rama Vasudevan committed
978
979
980
981
982
        try:
            BE_parm_vec_2 = matread['BE_parm_vec_2']
        except KeyError:
            BE_parm_vec_2 = 'None'

Somnath, Suhas's avatar
Somnath, Suhas committed
983
        # Not required for translation but necessary to have
984
        if BE_parm_vec_1[0] == 3 or BE_parm_vec_2[0] == 3:
Somnath, Suhas's avatar
Somnath, Suhas committed
985
986
987
988
989
990
            parm_dict['BE_phase_content'] = 'chirp-sinc hybrid'
        else:
            parm_dict['BE_phase_content'] = 'Unknown'
        parm_dict['BE_center_frequency_[Hz]'] = BE_parm_vec_1[1]
        parm_dict['BE_band_width_[Hz]'] = BE_parm_vec_1[2]
        parm_dict['BE_amplitude_[V]'] = BE_parm_vec_1[3]
991
992
        parm_dict['BE_band_edge_smoothing_[s]'] = BE_parm_vec_1[
            4]  # 150 most likely
Somnath, Suhas's avatar
Somnath, Suhas committed
993
        parm_dict['BE_phase_variation'] = BE_parm_vec_1[5]  # 0.01 most likely
Unknown's avatar
Unknown committed
994
995
996
        parm_dict['BE_window_adjustment'] = BE_parm_vec_1[6]
        parm_dict['BE_points_per_step'] = 2 ** int(BE_parm_vec_1[7])
        parm_dict['BE_repeats'] = 2 ** int(BE_parm_vec_1[8])
Somnath, Suhas's avatar
Somnath, Suhas committed
997
998
999
1000
        try:
            parm_dict['BE_bins_per_read'] = matread['bins_per_band_s']
        except KeyError:
            parm_dict['BE_bins_per_read'] = len(matread['bin_w'])