be_odf.py 72.9 KB
Newer Older
Somnath, Suhas's avatar
Somnath, Suhas committed
1
2
3
4
5
6
7
# -*- coding: utf-8 -*-
"""
Created on Tue Nov  3 15:24:12 2015

@author: Suhas Somnath, Stephen Jesse
"""

8
from __future__ import division, print_function, absolute_import, unicode_literals
9

Somnath, Suhas's avatar
Somnath, Suhas committed
10
from os import path, listdir, remove
11
import sys
12
import datetime
13
from warnings import warn
14
import h5py
Somnath, Suhas's avatar
Somnath, Suhas committed
15
16
import numpy as np
from scipy.io.matlab import loadmat  # To load parameters stored in Matlab .mat file
17

18
from .df_utils.be_utils import trimUDVS, getSpectroscopicParmLabel, parmsToDict, generatePlotGroups, \
19
20
    createSpecVals, requires_conjugate, generate_bipolar_triangular_waveform, \
    infer_bipolar_triangular_fraction_phase, nf32
21
from pyUSID.io.translator import Translator
22
23
from pyUSID.io.write_utils import INDICES_DTYPE, VALUES_DTYPE, Dimension, calc_chunks
from pyUSID.io.hdf_utils import write_ind_val_dsets, write_main_dataset, write_region_references, \
24
    create_indexed_group, write_simple_attrs, write_book_keeping_attrs, copy_attributes,\
25
    write_reduced_anc_dsets, get_unit_values
26
from pyUSID.io.usi_data import USIDataset
27
from pyUSID.processing.comp_utils import get_available_memory
28

29
30
31
if sys.version_info.major == 3:
    unicode = str

32

Somnath, Suhas's avatar
Somnath, Suhas committed
33
34
35
36
37
class BEodfTranslator(Translator):
    """
    Translates either the Band Excitation (BE) scan or Band Excitation 
    Polarization Switching (BEPS) data format from the old data format(s) to .h5
    """
Unknown's avatar
Unknown committed
38

Chris Smith's avatar
Chris Smith committed
39
40
41
    def __init__(self, *args, **kwargs):
        super(BEodfTranslator, self).__init__(*args, **kwargs)
        self.h5_raw = None
42
        self.num_rand_spectra = kwargs.pop('num_rand_spectra', 1000)
43
        self._cores = kwargs.pop('cores', None)
Unknown's avatar
Unknown committed
44
45
46
        self.FFT_BE_wave = None
        self.signal_type = None
        self.expt_type = None
Chris Smith's avatar
Chris Smith committed
47

48
    @staticmethod
49
    def is_valid_file(data_path):
50
51
52
53
54
        """
        Checks whether the provided file can be read by this translator

        Parameters
        ----------
55
        data_path : str
56
57
58
59
            Path to raw data file

        Returns
        -------
60
61
62
63
        obj : str
            Path to file that will be accepted by the translate() function if
            this translator is indeed capable of translating the provided file.
            Otherwise, None will be returned
64
        """
65
66
67
68
69
70
71
72
        if not isinstance(data_path, (str, unicode)):
            raise TypeError('data_path must be a string')

        ndf = 'newdataformat'

        data_path = path.abspath(data_path)

        if path.isfile(data_path):
73
74
75
76
            ext = data_path.split('.')[-1]
            if ext.lower() not in ['jpg', 'png', 'jpeg', 'tiff', 'mat', 'txt',
                                   'dat', 'xls', 'xlsx']:
                return None
77
78
            # we only care about the folder names at this point...
            data_path, _ = path.split(data_path)
79
80

        # Check if the data is in the new or old format:
81
82
83
84
85
86
87
        # Check one level up:
        _, dir_name = path.split(data_path)
        if dir_name == ndf:
            # Though this translator could also read the files but the NDF Translator is more robust...
            return None
        # Check one level down:
        if ndf in listdir(data_path):
88
            # Though this translator could also read the files but the NDF Translator is more robust...
89
90
91
            return None

        file_path = path.join(data_path, listdir(path=data_path)[0])
92
93

        _, path_dict = BEodfTranslator._parse_file_path(file_path)
94

95
96
        if any([x.find('bigtime_0') > 0 and x.endswith('.dat') for x in path_dict.values()]):
            # This is a G-mode Line experiment:
97
            return None
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
        if any([x.find('bigtime_0') > 0 and x.endswith('.dat') for x in
                path_dict.values()]):
            # This is a G-mode Line experiment:
            return None

        parm_found = any([piece in path_dict.keys() for piece in
                          ['parm_txt', 'old_mat_parms']])
        real_found = any([piece in path_dict.keys() for piece in
                          ['read_real', 'write_real']])
        imag_found = any([piece in path_dict.keys() for piece in
                          ['read_imag', 'write_imag']])

        if parm_found and real_found and imag_found:
            if 'parm_txt' in path_dict.keys():
                return path_dict['parm_txt']
            else:
                return path_dict['old_mat_parms']
115
        else:
116
            return None
117

118
119
    def translate(self, file_path, show_plots=True, save_plots=True,
                  do_histogram=False, verbose=False):
Somnath, Suhas's avatar
Somnath, Suhas committed
120
121
122
123
124
125
126
127
128
129
130
131
132
133
        """
        Translates .dat data file(s) to a single .h5 file
        
        Parameters
        -------------
        file_path : String / Unicode
            Absolute file path for one of the data files. 
            It is assumed that this file is of the OLD data format.
        show_plots : (optional) Boolean
            Whether or not to show intermediate plots
        save_plots : (optional) Boolean
            Whether or not to save plots to disk
        do_histogram : (optional) Boolean
            Whether or not to construct histograms to visualize data quality. Note - this takes a fair amount of time
134
135
        verbose : (optional) Boolean
            Whether or not to print statements
Somnath, Suhas's avatar
Somnath, Suhas committed
136
137
138
139
140
141
            
        Returns
        ----------
        h5_path : String / Unicode
            Absolute path of the resultant .h5 file
        """
ssomnath's avatar
ssomnath committed
142
143
        self._verbose = verbose

144
        file_path = path.abspath(file_path)
Somnath, Suhas's avatar
Somnath, Suhas committed
145
        (folder_path, basename) = path.split(file_path)
146
        (basename, path_dict) = self._parse_file_path(file_path)
Unknown's avatar
Unknown committed
147

Somnath, Suhas's avatar
Somnath, Suhas committed
148
        h5_path = path.join(folder_path, basename + '.h5')
Somnath, Suhas's avatar
Somnath, Suhas committed
149
150
        tot_bins_multiplier = 1
        udvs_denom = 2
Unknown's avatar
Unknown committed
151

Somnath, Suhas's avatar
Somnath, Suhas committed
152
        if 'parm_txt' in path_dict.keys():
ssomnath's avatar
ssomnath committed
153
            if self._verbose:
154
                print('\treading parameters from text file')
ssomnath's avatar
ssomnath committed
155
156
157
            isBEPS, parm_dict = parmsToDict(path_dict['parm_txt'])

            # Initial text files named some parameters differently
158
159
            if parm_dict['VS_mode'] == 'AC modulation mode':
                parm_dict['VS_mode'] = 'AC modulation mode with time reversal'
ssomnath's avatar
ssomnath committed
160
161
162
            if parm_dict['BE_phase_content'] == 'chirp':
                parm_dict['BE_phase_content'] ='chirp-sinc hybrid'

Somnath, Suhas's avatar
Somnath, Suhas committed
163
        elif 'old_mat_parms' in path_dict.keys():
ssomnath's avatar
ssomnath committed
164
            if self._verbose:
165
                print('\treading parameters from old mat file')
ssomnath's avatar
ssomnath committed
166
            parm_dict = self._get_parms_from_old_mat(path_dict['old_mat_parms'], verbose=self._verbose)
167
168
169
170
            if parm_dict['VS_steps_per_full_cycle'] == 0:
                isBEPS=False
            else:
                isBEPS=True
Somnath, Suhas's avatar
Somnath, Suhas committed
171
        else:
172
            raise FileNotFoundError('No parameters file found! Cannot translate this dataset!')
173

ssomnath's avatar
ssomnath committed
174
        if self._verbose:
175
176
            keys = list(parm_dict.keys())
            keys.sort()
177
            print('\tExperiment parameters:')
178
179
180
181
            for key in keys:
                print('\t\t{} : {}'.format(key, parm_dict[key]))

            print('\n\tisBEPS = {}'.format(isBEPS))
Unknown's avatar
Unknown committed
182

Somnath, Suhas's avatar
Somnath, Suhas committed
183
184
185
        ignored_plt_grps = []
        if isBEPS:
            parm_dict['data_type'] = 'BEPSData'
Unknown's avatar
Unknown committed
186

Somnath, Suhas's avatar
Somnath, Suhas committed
187
188
            field_mode = parm_dict['VS_measure_in_field_loops']
            std_expt = parm_dict['VS_mode'] != 'load user defined VS Wave from file'
Unknown's avatar
Unknown committed
189

Somnath, Suhas's avatar
Somnath, Suhas committed
190
            if not std_expt:
191
                raise ValueError('This translator does not handle user defined voltage spectroscopy')
Unknown's avatar
Unknown committed
192
193
194

            spec_label = getSpectroscopicParmLabel(parm_dict['VS_mode'])

Somnath, Suhas's avatar
Somnath, Suhas committed
195
            if parm_dict['VS_mode'] in ['DC modulation mode', 'current mode']:
Somnath, Suhas's avatar
Somnath, Suhas committed
196
197
198
199
200
201
202
203
204
205
206
                if field_mode == 'in and out-of-field':
                    tot_bins_multiplier = 2
                    udvs_denom = 1
                else:
                    if field_mode == 'out-of-field':
                        ignored_plt_grps = ['in-field']
                    else:
                        ignored_plt_grps = ['out-of-field']
            else:
                tot_bins_multiplier = 1
                udvs_denom = 1
Unknown's avatar
Unknown committed
207

Somnath, Suhas's avatar
Somnath, Suhas committed
208
209
210
        else:
            spec_label = 'None'
            parm_dict['data_type'] = 'BELineData'
Unknown's avatar
Unknown committed
211

Somnath, Suhas's avatar
Somnath, Suhas committed
212
        # Check file sizes:
ssomnath's avatar
ssomnath committed
213
        if self._verbose:
214
215
            print('\tChecking sizes of real and imaginary data files')

Somnath, Suhas's avatar
Somnath, Suhas committed
216
        if 'read_real' in path_dict.keys():
Somnath, Suhas's avatar
Somnath, Suhas committed
217
218
            real_size = path.getsize(path_dict['read_real'])
            imag_size = path.getsize(path_dict['read_imag'])
Somnath, Suhas's avatar
Somnath, Suhas committed
219
220
221
        else:
            real_size = path.getsize(path_dict['write_real'])
            imag_size = path.getsize(path_dict['write_imag'])
Unknown's avatar
Unknown committed
222

Somnath, Suhas's avatar
Somnath, Suhas committed
223
        if real_size != imag_size:
ssomnath's avatar
ssomnath committed
224
225
226
227
            raise ValueError("Real and imaginary file sizes do not match!")

        if real_size == 0:
            raise ValueError('Real and imaginary files were empty')
Somnath, Suhas's avatar
Somnath, Suhas committed
228

229
        # Check here if a second channel for current is present
230
231
        # Look for the file containing the current data

ssomnath's avatar
ssomnath committed
232
        if self._verbose:
233
            print('\tLooking for secondary channels')
234
235
        file_names = listdir(folder_path)
        aux_files = []
Unknown's avatar
Unknown committed
236
        current_data_exists = False
237
238
239
240
241
242
243
        for fname in file_names:
            if 'AI2' in fname:
                if 'write' in fname:
                    current_file = path.join(folder_path, fname)
                    current_data_exists=True
                aux_files.append(path.join(folder_path, fname))

Unknown's avatar
Unknown committed
244
        add_pix = False
Somnath, Suhas's avatar
Somnath, Suhas committed
245
246
        num_rows = int(parm_dict['grid_num_rows'])
        num_cols = int(parm_dict['grid_num_cols'])
ssomnath's avatar
ssomnath committed
247
        if self._verbose:
248
            print('\tRows: {}, Cols: {}'.format(num_rows, num_cols))
Unknown's avatar
Unknown committed
249
250
        num_pix = num_rows * num_cols
        tot_bins = real_size / (num_pix * 4)
Chris Smith's avatar
Chris Smith committed
251
        # Check for case where only a single pixel is missing.
252
253
254
255
        if num_pix == 1:
            check_bins = real_size / (num_pix * 4)
        else:
            check_bins = real_size / ((num_pix - 1) * 4)
Unknown's avatar
Unknown committed
256

ssomnath's avatar
ssomnath committed
257
        if self._verbose:
258
259
260
            print('\tChecking bins: Total: {}, actual: {}'.format(tot_bins,
                                                                  check_bins))

Unknown's avatar
Unknown committed
261
        if tot_bins % 1 and check_bins % 1:
262
263
            raise ValueError('Aborting! Some parameter appears to have '
                             'changed in-between')
Somnath, Suhas's avatar
Somnath, Suhas committed
264
        elif not tot_bins % 1:
Chris Smith's avatar
Chris Smith committed
265
            # Everything's ok
Somnath, Suhas's avatar
Somnath, Suhas committed
266
267
268
            pass
        elif not check_bins % 1:
            tot_bins = check_bins
269
270
            warn('Warning:  A pixel seems to be missing from the data. '
                 'File will be padded with zeros.')
Unknown's avatar
Unknown committed
271
272
273
274
            add_pix = True

        tot_bins = int(tot_bins) * tot_bins_multiplier

Somnath, Suhas's avatar
Somnath, Suhas committed
275
        if 'parm_mat' in path_dict.keys():
ssomnath's avatar
ssomnath committed
276
            if self._verbose:
277
                print('\treading BE arrays from parameters text file')
278
            bin_inds, bin_freqs, bin_FFT, ex_wfm = self._read_parms_mat(path_dict['parm_mat'], isBEPS)
Somnath, Suhas's avatar
Somnath, Suhas committed
279
        elif 'old_mat_parms' in path_dict.keys():
ssomnath's avatar
ssomnath committed
280
            if self._verbose:
281
                print('\treading BE arrays from old mat text file')
282
            bin_inds, bin_freqs, bin_FFT, ex_wfm, dc_amp_vec = self._read_old_mat_be_vecs(path_dict['old_mat_parms'], verbose=verbose)
Somnath, Suhas's avatar
Somnath, Suhas committed
283
        else:
ssomnath's avatar
ssomnath committed
284
            if self._verbose:
285
                print('\tGenerating dummy BE arrays')
Unknown's avatar
Unknown committed
286
            band_width = parm_dict['BE_band_width_[Hz]'] * (0.5 - parm_dict['BE_band_edge_trim'])
Somnath, Suhas's avatar
Somnath, Suhas committed
287
            st_f = parm_dict['BE_center_frequency_[Hz]'] - band_width
Unknown's avatar
Unknown committed
288
            en_f = parm_dict['BE_center_frequency_[Hz]'] + band_width
Somnath, Suhas's avatar
Somnath, Suhas committed
289
            bin_freqs = np.linspace(st_f, en_f, tot_bins, dtype=np.float32)
Unknown's avatar
Unknown committed
290

291
            warn('No parms .mat file found.... Filling dummy values into ancillary datasets.')
Somnath, Suhas's avatar
Somnath, Suhas committed
292
293
294
            bin_inds = np.zeros(shape=tot_bins, dtype=np.int32)
            bin_FFT = np.zeros(shape=tot_bins, dtype=np.complex64)
            ex_wfm = np.zeros(shape=100, dtype=np.float32)
Unknown's avatar
Unknown committed
295

Somnath, Suhas's avatar
Somnath, Suhas committed
296
297
298
299
300
        # Forcing standardized datatypes:
        bin_inds = np.int32(bin_inds)
        bin_freqs = np.float32(bin_freqs)
        bin_FFT = np.complex64(bin_FFT)
        ex_wfm = np.float32(ex_wfm)
301

Somnath, Suhas's avatar
Somnath, Suhas committed
302
        self.FFT_BE_wave = bin_FFT
303

Somnath, Suhas's avatar
Somnath, Suhas committed
304
        if isBEPS:
ssomnath's avatar
ssomnath committed
305
            if self._verbose:
306
                print('\tBuilding UDVS table for BEPS')
ssomnath's avatar
ssomnath committed
307
            UDVS_labs, UDVS_units, UDVS_mat = self._build_udvs_table(parm_dict)
Unknown's avatar
Unknown committed
308

ssomnath's avatar
ssomnath committed
309
            if self._verbose:
310
                print('\tTrimming UDVS table to remove unused plot group columns')
311

312
            UDVS_mat, UDVS_labs, UDVS_units = trimUDVS(UDVS_mat, UDVS_labs, UDVS_units, ignored_plt_grps)
Unknown's avatar
Unknown committed
313

314
            old_spec_inds = np.zeros(shape=(2, tot_bins), dtype=INDICES_DTYPE)
Unknown's avatar
Unknown committed
315

316
            # Will assume that all excitation waveforms have same num of bins
Unknown's avatar
Unknown committed
317
318
            num_actual_udvs_steps = UDVS_mat.shape[0] / udvs_denom
            bins_per_step = tot_bins / num_actual_udvs_steps
ssomnath's avatar
ssomnath committed
319
            if self._verbose:
320
321
                print('\t# UDVS steps: {}, # bins/step: {}'
                      ''.format(num_actual_udvs_steps, bins_per_step))
Unknown's avatar
Unknown committed
322

Somnath, Suhas's avatar
Somnath, Suhas committed
323
            if bins_per_step % 1:
Somnath, Suhas's avatar
Somnath, Suhas committed
324
325
                print('UDVS mat shape: {}, total bins: {}, bins per step: {}'.format(UDVS_mat.shape, tot_bins,
                                                                                     bins_per_step))
326
                raise ValueError('Non integer number of bins per step!')
Unknown's avatar
Unknown committed
327

Somnath, Suhas's avatar
Somnath, Suhas committed
328
329
            bins_per_step = int(bins_per_step)
            num_actual_udvs_steps = int(num_actual_udvs_steps)
Unknown's avatar
Unknown committed
330
331
332

            stind = 0
            for step_index in range(UDVS_mat.shape[0]):
Unknown's avatar
Unknown committed
333
334
335
                if UDVS_mat[step_index, 2] < 1E-3:  # invalid AC amplitude
                    continue
                # Bin step
336
                old_spec_inds[0, stind:stind + bins_per_step] = np.arange(bins_per_step, dtype=INDICES_DTYPE)
Unknown's avatar
Unknown committed
337
                # UDVS step
338
                old_spec_inds[1, stind:stind + bins_per_step] = step_index * np.ones(bins_per_step, dtype=INDICES_DTYPE)
Somnath, Suhas's avatar
Somnath, Suhas committed
339
                stind += bins_per_step
Somnath, Suhas's avatar
Somnath, Suhas committed
340
            del stind, step_index
Unknown's avatar
Unknown committed
341

Somnath, Suhas's avatar
Somnath, Suhas committed
342
        else:  # BE Line
ssomnath's avatar
ssomnath committed
343
            if self._verbose:
344
                print('\tPreparing supporting variables since BE-Line')
Somnath, Suhas's avatar
Somnath, Suhas committed
345
            self.signal_type = 1
Somnath, Suhas's avatar
Somnath, Suhas committed
346
            self.expt_type = 1  # Stephen has not used this index for some reason
Somnath, Suhas's avatar
Somnath, Suhas committed
347
348
            num_actual_udvs_steps = 1
            bins_per_step = tot_bins
Somnath, Suhas's avatar
Somnath, Suhas committed
349
            UDVS_labs = ['step_num', 'dc_offset', 'ac_amp', 'wave_type', 'wave_mod', 'be-line']
Somnath, Suhas's avatar
Somnath, Suhas committed
350
            UDVS_units = ['', 'V', 'A', '', '', '']
Somnath, Suhas's avatar
Somnath, Suhas committed
351
352
            UDVS_mat = np.array([1, 0, parm_dict['BE_amplitude_[V]'], 1, 1, 1],
                                dtype=np.float32).reshape(1, len(UDVS_labs))
Somnath, Suhas's avatar
Somnath, Suhas committed
353

Chris Smith's avatar
Chris Smith committed
354
355
            old_spec_inds = np.vstack((np.arange(tot_bins, dtype=INDICES_DTYPE),
                                       np.zeros(tot_bins, dtype=INDICES_DTYPE)))
Unknown's avatar
Unknown committed
356

Somnath, Suhas's avatar
Somnath, Suhas committed
357
358
359
        # Some very basic information that can help the processing / analysis crew
        parm_dict['num_bins'] = tot_bins
        parm_dict['num_pix'] = num_pix
360
        parm_dict['num_udvs_steps'] = num_actual_udvs_steps
Rama Vasudevan's avatar
Rama Vasudevan committed
361
        parm_dict['num_steps'] = num_actual_udvs_steps
Unknown's avatar
Unknown committed
362

ssomnath's avatar
ssomnath committed
363
        if self._verbose:
364
            print('\tPreparing UDVS slices for region references')
Somnath, Suhas's avatar
Somnath, Suhas committed
365
        udvs_slices = dict()
Somnath, Suhas's avatar
Somnath, Suhas committed
366
        for col_ind, col_name in enumerate(UDVS_labs):
Unknown's avatar
Unknown committed
367
368
            udvs_slices[col_name] = (slice(None), slice(col_ind, col_ind + 1))

Somnath, Suhas's avatar
Somnath, Suhas committed
369
        # Need to add the Bin Waveform type - infer from UDVS        
Unknown's avatar
Unknown committed
370
        exec_bin_vec = self.signal_type * np.ones(len(bin_inds), dtype=np.int32)
Somnath, Suhas's avatar
Somnath, Suhas committed
371
372

        if self.expt_type == 2:
ssomnath's avatar
ssomnath committed
373
            if self._verbose:
374
                print('\tExperiment type = 2. Doubling BE vectors')
Unknown's avatar
Unknown committed
375
            exec_bin_vec = np.hstack((exec_bin_vec, -1 * exec_bin_vec))
Somnath, Suhas's avatar
Somnath, Suhas committed
376
377
            bin_inds = np.hstack((bin_inds, bin_inds))
            bin_freqs = np.hstack((bin_freqs, bin_freqs))
Somnath, Suhas's avatar
Somnath, Suhas committed
378
            # This is wrong but I don't know what else to do
Somnath, Suhas's avatar
Somnath, Suhas committed
379
            bin_FFT = np.hstack((bin_FFT, bin_FFT))
Unknown's avatar
Unknown committed
380

Somnath, Suhas's avatar
Somnath, Suhas committed
381
        # Create Spectroscopic Values and Spectroscopic Values Labels datasets
382
        # This is an old and legacy way of doing things. Ideally, all we would need ot do is just get the unit values
ssomnath's avatar
ssomnath committed
383
        if self._verbose:
384
            print('\tCalculating spectroscopic values')
ssomnath's avatar
ssomnath committed
385
386
387
388
        ret_vals = createSpecVals(UDVS_mat, old_spec_inds, bin_freqs,
                                  exec_bin_vec, parm_dict, UDVS_labs,
                                  UDVS_units, verbose=verbose)
        spec_vals, spec_inds, spec_vals_labs, spec_vals_units, spec_vals_labs_names = ret_vals
389

ssomnath's avatar
ssomnath committed
390
        if self._verbose:
391
            print('\t\tspec_vals_labs: {}'.format(spec_vals_labs))
392
393
394
            unit_vals = get_unit_values(spec_inds, spec_vals,
                                        all_dim_names=spec_vals_labs,
                                        is_spec=True, verbose=False)
395
396
397
398
            print('\tUnit spectroscopic values')
            for key, val in unit_vals.items():
                print('\t\t{} : length: {}, values:\n\t\t\t{}'.format(key, len(val), val))

399
400
401
402
        if spec_inds.shape[1] != tot_bins:
            raise ValueError('Second axis of spectroscopic indices: {} not '
                             'matching with second axis of the expected main '
                             'dataset: {}'.format(spec_inds.shape, tot_bins))
403

404
405
406
407
        # Not sure what is happening here but this should work.
        spec_dim_dict = dict()
        for entry in spec_vals_labs_names:
            spec_dim_dict[entry[0] + '_parameters'] = entry[1]
Chris Smith's avatar
Chris Smith committed
408

Somnath, Suhas's avatar
Somnath, Suhas committed
409
410
411
        spec_vals_slices = dict()

        for row_ind, row_name in enumerate(spec_vals_labs):
Unknown's avatar
Unknown committed
412
            spec_vals_slices[row_name] = (slice(row_ind, row_ind + 1), slice(None))
Somnath, Suhas's avatar
Somnath, Suhas committed
413

414
        if path.exists(h5_path):
ssomnath's avatar
ssomnath committed
415
            if self._verbose:
416
                print('\tRemoving existing / old translated file: ' + h5_path)
417
            remove(h5_path)
Chris Smith's avatar
Chris Smith committed
418

419
        # First create the file
ssomnath's avatar
ssomnath committed
420
        h5_f = h5py.File(h5_path, mode='w')
Somnath, Suhas's avatar
Somnath, Suhas committed
421

422
        # Then write root level attributes
423
        global_parms = dict()
Somnath, Suhas's avatar
Somnath, Suhas committed
424
425
        global_parms['grid_size_x'] = parm_dict['grid_num_cols']
        global_parms['grid_size_y'] = parm_dict['grid_num_rows']
Somnath, Suhas's avatar
Somnath, Suhas committed
426
427
428
429
        try:
            global_parms['experiment_date'] = parm_dict['File_date_and_time']
        except KeyError:
            global_parms['experiment_date'] = '1:1:1'
Chris Smith's avatar
Chris Smith committed
430

Somnath, Suhas's avatar
Somnath, Suhas committed
431
        # assuming that the experiment was completed:
Unknown's avatar
Unknown committed
432
433
        global_parms['current_position_x'] = parm_dict['grid_num_cols'] - 1
        global_parms['current_position_y'] = parm_dict['grid_num_rows'] - 1
Somnath, Suhas's avatar
Somnath, Suhas committed
434
        global_parms['data_type'] = parm_dict['data_type']
Somnath, Suhas's avatar
Somnath, Suhas committed
435
        global_parms['translator'] = 'ODF'
ssomnath's avatar
ssomnath committed
436
        if self._verbose:
437
            print('\tWriting attributes to HDF5 file root')
438
        write_simple_attrs(h5_f, global_parms)
439
        write_book_keeping_attrs(h5_f)
Unknown's avatar
Unknown committed
440

441
442
        # Then create the measurement group
        h5_meas_group = create_indexed_group(h5_f, 'Measurement')
Unknown's avatar
Unknown committed
443

444
        # Write attributes at the measurement group level
ssomnath's avatar
ssomnath committed
445
        if self._verbose:
446
            print('\twriting attributes to Measurement group')
447
        write_simple_attrs(h5_meas_group, parm_dict)
Unknown's avatar
Unknown committed
448

449
450
        # Create the Channel group
        h5_chan_grp = create_indexed_group(h5_meas_group, 'Channel')
Unknown's avatar
Unknown committed
451

452
        # Write channel group attributes
Rama Vasudevan's avatar
Rama Vasudevan committed
453
454
        write_simple_attrs(h5_chan_grp, {'Channel_Input': 'IO_Analog_Input_1',
                                         'channel_type': 'BE'})
Unknown's avatar
Unknown committed
455

456
        # Now the datasets!
ssomnath's avatar
ssomnath committed
457
        if self._verbose:
458
            print('\tCreating ancillary datasets')
Chris Smith's avatar
Chris Smith committed
459
        h5_chan_grp.create_dataset('Excitation_Waveform', data=ex_wfm)
Unknown's avatar
Unknown committed
460

461
        h5_udvs = h5_chan_grp.create_dataset('UDVS', data=UDVS_mat)
ssomnath's avatar
ssomnath committed
462
463
464
        # TODO: Avoid using region references in USID
        write_region_references(h5_udvs, udvs_slices, add_labels_attr=True, verbose=self._verbose)
        write_simple_attrs(h5_udvs, {'units': UDVS_units}, verbose=False)
465

Chris Smith's avatar
Chris Smith committed
466
        h5_chan_grp.create_dataset('UDVS_Indices', data=old_spec_inds[1])
467

Chris Smith's avatar
Chris Smith committed
468
469
        h5_chan_grp.create_dataset('Bin_Step', data=np.arange(bins_per_step, dtype=INDICES_DTYPE),
                                   dtype=INDICES_DTYPE)
470

Chris Smith's avatar
Chris Smith committed
471
472
473
474
        h5_chan_grp.create_dataset('Bin_Indices', data=bin_inds, dtype=INDICES_DTYPE)
        h5_chan_grp.create_dataset('Bin_Frequencies', data=bin_freqs)
        h5_chan_grp.create_dataset('Bin_FFT', data=bin_FFT)
        h5_chan_grp.create_dataset('Bin_Wfm_Type', data=exec_bin_vec)
475

ssomnath's avatar
ssomnath committed
476
        if self._verbose:
477
478
479
480
            print('\tWriting Position datasets')

        pos_dims = [Dimension('X', 'm', np.arange(num_cols)),
                    Dimension('Y', 'm', np.arange(num_rows))]
ssomnath's avatar
ssomnath committed
481
482
        h5_pos_ind, h5_pos_val = write_ind_val_dsets(h5_chan_grp, pos_dims, is_spectral=False, verbose=self._verbose)
        if self._verbose:
483
            print('\tPosition datasets of shape: {}'.format(h5_pos_ind.shape))
484

ssomnath's avatar
ssomnath committed
485
        if self._verbose:
486
            print('\tWriting Spectroscopic datasets of shape: {}'.format(spec_inds.shape))
487
488
489
        h5_spec_inds = h5_chan_grp.create_dataset('Spectroscopic_Indices', data=spec_inds, dtype=INDICES_DTYPE)        
        h5_spec_vals = h5_chan_grp.create_dataset('Spectroscopic_Values', data=np.array(spec_vals), dtype=VALUES_DTYPE)
        for dset in [h5_spec_inds, h5_spec_vals]:
ssomnath's avatar
ssomnath committed
490
491
            write_region_references(dset, spec_vals_slices, add_labels_attr=True, verbose=self._verbose)
            write_simple_attrs(dset, {'units': spec_vals_units}, verbose=False)
492
            write_simple_attrs(dset, spec_dim_dict)
493
494

        # Noise floor should be of shape: (udvs_steps x 3 x positions)
ssomnath's avatar
ssomnath committed
495
        if self._verbose:
496
            print('\tWriting noise floor dataset')
Chris Smith's avatar
Chris Smith committed
497
498
        h5_chan_grp.create_dataset('Noise_Floor', (num_pix, num_actual_udvs_steps), dtype=nf32,
                                   chunks=(1, num_actual_udvs_steps))
499
500
501
502
503
504
505
506
507
508
509

        """
        New Method for chunking the Main_Data dataset.  Chunking is now done in N-by-N squares
        of UDVS steps by pixels.  N is determined dynamically based on the dimensions of the
        dataset.  Currently it is set such that individual chunks are less than 10kB in size.

        Chris Smith -- csmith55@utk.edu
        """
        BEPS_chunks = calc_chunks([num_pix, tot_bins],
                                  np.complex64(0).itemsize,
                                  unit_chunks=(1, bins_per_step))
ssomnath's avatar
ssomnath committed
510
        if self._verbose:
511
            print('\tHDF5 dataset will have chunks of size: {}'.format(BEPS_chunks))
512
            print('\tCreating empty main dataset of shape: ({}, {})'.format(num_pix, tot_bins))
513
514
515
        self.h5_raw = write_main_dataset(h5_chan_grp, (num_pix, tot_bins), 'Raw_Data', 'Piezoresponse', 'V', None, None,
                                         dtype=np.complex64, chunks=BEPS_chunks, compression='gzip',
                                         h5_pos_inds=h5_pos_ind, h5_pos_vals=h5_pos_val, h5_spec_inds=h5_spec_inds,
ssomnath's avatar
ssomnath committed
516
                                         h5_spec_vals=h5_spec_vals, verbose=self._verbose)
Somnath, Suhas's avatar
Somnath, Suhas committed
517

ssomnath's avatar
ssomnath committed
518
        if self._verbose:
519
520
            print('\tReading data from binary data files into raw HDF5')
        self._read_data(UDVS_mat, parm_dict, path_dict, real_size, isBEPS,
ssomnath's avatar
ssomnath committed
521
                        add_pix)
Unknown's avatar
Unknown committed
522

ssomnath's avatar
ssomnath committed
523
        if self._verbose:
524
            print('\tGenerating plot groups')
525
        generatePlotGroups(self.h5_raw, self.mean_resp, folder_path, basename,
Somnath, Suhas's avatar
Somnath, Suhas committed
526
                           self.max_resp, self.min_resp, max_mem_mb=self.max_ram,
Somnath, Suhas's avatar
Somnath, Suhas committed
527
                           spec_label=spec_label, show_plots=show_plots, save_plots=save_plots,
ssomnath's avatar
ssomnath committed
528
529
                           do_histogram=do_histogram, debug=self._verbose)
        if self._verbose:
530
            print('\tUpgrading to USIDataset')
531
        self.h5_raw = USIDataset(self.h5_raw)
Unknown's avatar
Unknown committed
532
533
534

        # Go ahead and read the current data in the second (current) channel
        if current_data_exists:                     #If a .dat file matches
ssomnath's avatar
ssomnath committed
535
            if self._verbose:
536
                print('\tReading data in secondary channels (current)')
537
            self._read_secondary_channel(h5_meas_group, aux_files)
538

ssomnath's avatar
ssomnath committed
539
        if self._verbose:
540
            print('\tClosing HDF5 file')
541
        h5_f.close()
Unknown's avatar
Unknown committed
542

Somnath, Suhas's avatar
Somnath, Suhas committed
543
        return h5_path
Chris Smith's avatar
Chris Smith committed
544

545
    def _read_data(self, UDVS_mat, parm_dict, path_dict, real_size, isBEPS,
ssomnath's avatar
ssomnath committed
546
                   add_pix):
Chris Smith's avatar
Chris Smith committed
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
        """
        Checks if the data is BEPS or BELine and calls the correct function to read the data from
        file

        Parameters
        ----------
        UDVS_mat : numpy.ndarray of float
            UDVS table
        parm_dict : dict
            Experimental parameters
        path_dict : dict
            Dictionary of data files to be read
        real_size : dict
            Size of each data file
        isBEPS : boolean
            Is the data BEPS
        add_pix : boolean
            Does the reader need to add extra pixels to the end of the dataset

        Returns
        -------
        None
        """
        # Now read the raw data files:
        if not isBEPS:
            # Do this for all BE-Line (always small enough to read in one shot)
ssomnath's avatar
ssomnath committed
573
            if self._verbose:
574
                print('\t\tReading all raw data for BE-Line in one shot')
575
576
            self._quick_read_data(path_dict['read_real'],
                                  path_dict['read_imag'],
ssomnath's avatar
ssomnath committed
577
                                  parm_dict['num_udvs_steps'])
578
579
        elif real_size < self.max_ram and \
                parm_dict['VS_measure_in_field_loops'] == 'out-of-field':
Chris Smith's avatar
Chris Smith committed
580
            # Do this for out-of-field BEPS ONLY that is also small (256 MB)
ssomnath's avatar
ssomnath committed
581
            if self._verbose:
582
583
584
                print('\t\tReading all raw BEPS (out-of-field) data at once')
            self._quick_read_data(path_dict['read_real'],
                                  path_dict['read_imag'],
ssomnath's avatar
ssomnath committed
585
                                  parm_dict['num_udvs_steps'])
586
587
        elif real_size < self.max_ram and \
                parm_dict['VS_measure_in_field_loops'] == 'in-field':
Chris Smith's avatar
Chris Smith committed
588
            # Do this for in-field only
ssomnath's avatar
ssomnath committed
589
            if self._verbose:
590
591
592
                print('\t\tReading all raw BEPS (in-field only) data at once')
            self._quick_read_data(path_dict['write_real'],
                                  path_dict['write_imag'],
ssomnath's avatar
ssomnath committed
593
                                  parm_dict['num_udvs_steps'])
Chris Smith's avatar
Chris Smith committed
594
595
        else:
            # Large BEPS datasets OR those with in-and-out of field
ssomnath's avatar
ssomnath committed
596
            if self._verbose:
597
598
599
600
601
                print('\t\tReading all raw data for in-and-out-of-field OR '
                      'very large file one pixel at a time')
            self._read_beps_data(path_dict, UDVS_mat.shape[0],
                                 parm_dict['VS_measure_in_field_loops'],
                                 add_pix)
602
        self.h5_raw.file.flush()
Chris Smith's avatar
Chris Smith committed
603

604
    def _read_beps_data(self, path_dict, udvs_steps, mode, add_pixel=False):
Somnath, Suhas's avatar
Somnath, Suhas committed
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
        """
        Reads the imaginary and real data files pixelwise and writes to the H5 file 
        
        Parameters 
        --------------------
        path_dict : dictionary
            Dictionary containing the absolute paths of the real and imaginary data files
        udvs_steps : unsigned int
            Number of UDVS steps
        mode : String / Unicode
            'in-field', 'out-of-field', or 'in and out-of-field'
        add_pixel : boolean. (Optional; default is False)
            If an empty pixel worth of data should be written to the end             
        
        Returns 
        -------------------- 
        None
        """
Unknown's avatar
Unknown committed
623

Somnath, Suhas's avatar
Somnath, Suhas committed
624
        print('---- reading pixel-by-pixel ----------')
Unknown's avatar
Unknown committed
625
626
627
628

        bytes_per_pix = self.h5_raw.shape[1] * 4
        step_size = self.h5_raw.shape[1] / udvs_steps

Somnath, Suhas's avatar
Somnath, Suhas committed
629
        if mode == 'out-of-field':
Unknown's avatar
Unknown committed
630
            parsers = [BEodfParser(path_dict['read_real'], path_dict['read_imag'],
Somnath, Suhas's avatar
Somnath, Suhas committed
631
                                   self.h5_raw.shape[0], bytes_per_pix)]
Somnath, Suhas's avatar
Somnath, Suhas committed
632
        elif mode == 'in-field':
Unknown's avatar
Unknown committed
633
            parsers = [BEodfParser(path_dict['write_real'], path_dict['write_imag'],
Somnath, Suhas's avatar
Somnath, Suhas committed
634
                                   self.h5_raw.shape[0], bytes_per_pix)]
Somnath, Suhas's avatar
Somnath, Suhas committed
635
636
        elif mode == 'in and out-of-field':
            # each file will only have half the udvs steps:
Unknown's avatar
Unknown committed
637
            if 0.5 * udvs_steps % 1:
638
639
                raise ValueError('Odd number of UDVS')

Unknown's avatar
Unknown committed
640
            udvs_steps = int(0.5 * udvs_steps)
Somnath, Suhas's avatar
Somnath, Suhas committed
641
            # be careful - each pair contains only half the necessary bins - so read half
Unknown's avatar
Unknown committed
642
            parsers = [BEodfParser(path_dict['write_real'], path_dict['write_imag'],
Somnath, Suhas's avatar
Somnath, Suhas committed
643
                                   self.h5_raw.shape[0], int(bytes_per_pix / 2)),
Unknown's avatar
Unknown committed
644
645
646
                       BEodfParser(path_dict['read_real'], path_dict['read_imag'],
                                   self.h5_raw.shape[0], int(bytes_per_pix / 2))]

Somnath, Suhas's avatar
Somnath, Suhas committed
647
            if step_size % 1:
648
649
                raise ValueError('strange number of bins per UDVS step. Exiting')

Somnath, Suhas's avatar
Somnath, Suhas committed
650
            step_size = int(step_size)
651

652
653
        rand_spectra = self._get_random_spectra(parsers, self.h5_raw.shape[0], udvs_steps, step_size,
                                                num_spectra=self.num_rand_spectra)
654
        take_conjugate = requires_conjugate(rand_spectra, cores=self._cores)
655

Somnath, Suhas's avatar
Somnath, Suhas committed
656
657
658
659
        self.mean_resp = np.zeros(shape=(self.h5_raw.shape[1]), dtype=np.complex64)
        self.max_resp = np.zeros(shape=(self.h5_raw.shape[0]), dtype=np.float32)
        self.min_resp = np.zeros(shape=(self.h5_raw.shape[0]), dtype=np.float32)

Unknown's avatar
Unknown committed
660
        numpix = self.h5_raw.shape[0]
Somnath, Suhas's avatar
Somnath, Suhas committed
661
662
663
        """ 
        Don't try to do the last step if a pixel is missing.   
        This will be handled after the loop. 
Unknown's avatar
Unknown committed
664
665
666
667
        """
        if add_pixel:
            numpix -= 1

Somnath, Suhas's avatar
Somnath, Suhas committed
668
        for pix_indx in range(numpix):
Somnath, Suhas's avatar
Somnath, Suhas committed
669
            if self.h5_raw.shape[0] > 5:
Unknown's avatar
Unknown committed
670
671
672
                if pix_indx % int(round(self.h5_raw.shape[0] / 10)) == 0:
                    print('Reading... {} complete'.format(round(100 * pix_indx / self.h5_raw.shape[0])))

Somnath, Suhas's avatar
Somnath, Suhas committed
673
674
675
            # get the raw stream from each parser
            pxl_data = list()
            for prsr in parsers:
Somnath, Suhas's avatar
Somnath, Suhas committed
676
                pxl_data.append(prsr.read_pixel())
Unknown's avatar
Unknown committed
677

Somnath, Suhas's avatar
Somnath, Suhas committed
678
679
680
681
682
            # interleave if both in and out of field
            # we are ignoring user defined possibilities...
            if mode == 'in and out-of-field':
                in_fld = pxl_data[0]
                out_fld = pxl_data[1]
Unknown's avatar
Unknown committed
683

Somnath, Suhas's avatar
Somnath, Suhas committed
684
685
                in_fld_2 = in_fld.reshape(udvs_steps, step_size)
                out_fld_2 = out_fld.reshape(udvs_steps, step_size)
Unknown's avatar
Unknown committed
686
                raw_mat = np.empty((udvs_steps * 2, step_size), dtype=out_fld.dtype)
Somnath, Suhas's avatar
Somnath, Suhas committed
687
688
                raw_mat[0::2, :] = in_fld_2
                raw_mat[1::2, :] = out_fld_2
Somnath, Suhas's avatar
Somnath, Suhas committed
689
690
                raw_vec = raw_mat.reshape(in_fld.size + out_fld.size).transpose()
            else:
Somnath, Suhas's avatar
Somnath, Suhas committed
691
                raw_vec = pxl_data[0]  # only one parser
Somnath, Suhas's avatar
Somnath, Suhas committed
692
693
            self.max_resp[pix_indx] = np.max(np.abs(raw_vec))
            self.min_resp[pix_indx] = np.min(np.abs(raw_vec))
Unknown's avatar
Unknown committed
694
            self.mean_resp = (1 / (pix_indx + 1)) * (raw_vec + pix_indx * self.mean_resp)
695
696
697

            if take_conjugate:
                raw_vec = np.conjugate(raw_vec)
698
            self.h5_raw[pix_indx, :] = np.complex64(raw_vec[:])
699
            self.h5_raw.file.flush()
Unknown's avatar
Unknown committed
700

Somnath, Suhas's avatar
Somnath, Suhas committed
701
        # Add zeros to main_data for the missing pixel. 
Unknown's avatar
Unknown committed
702
703
704
        if add_pixel:
            self.h5_raw[-1, :] = 0 + 0j

Somnath, Suhas's avatar
Somnath, Suhas committed
705
        print('---- Finished reading files -----')
706

ssomnath's avatar
ssomnath committed
707
    def _quick_read_data(self, real_path, imag_path, udvs_steps):
Somnath, Suhas's avatar
Somnath, Suhas committed
708
        """
Somnath, Suhas's avatar
Somnath, Suhas committed
709
710
711
712
713
714
715
716
        Returns information about the excitation BE waveform present in the .mat file

        Parameters
        -----------
        real_path : String / Unicode
            Absolute file path of the real data file
        imag_path : String / Unicode
            Absolute file path of the real data file
717
718
        udvs_steps : unsigned int
            Number of UDVS steps
Somnath, Suhas's avatar
Somnath, Suhas committed
719
        """
720
721
        parser = BEodfParser(real_path, imag_path, self.h5_raw.shape[0],
                             self.h5_raw.shape[1] * 4)
722
723

        step_size = self.h5_raw.shape[1] / udvs_steps
724
725
726
727
        rand_spectra = self._get_random_spectra([parser],
                                                self.h5_raw.shape[0],
                                                udvs_steps, step_size,
                                                num_spectra=self.num_rand_spectra,
ssomnath's avatar
ssomnath committed
728
729
                                                verbose=self._verbose)
        if self._verbose:
730
            print('\t\t\tChecking if conjugate is required')
731
        take_conjugate = requires_conjugate(rand_spectra, cores=self._cores)
Somnath, Suhas's avatar
Somnath, Suhas committed
732
        raw_vec = parser.read_all_data()
733
        if take_conjugate:
ssomnath's avatar
ssomnath committed
734
            if self._verbose:
735
                print('\t'*4 + 'Taking conjugate for positive quality factors')
736
            raw_vec = np.conjugate(raw_vec)
Unknown's avatar
Unknown committed
737

Rama Vasudevan's avatar
Rama Vasudevan committed
738
739
        if raw_vec.shape != np.prod(self.h5_raw.shape):
            percentage_padded = 100 * (np.prod(self.h5_raw.shape) - raw_vec.shape) / np.prod(self.h5_raw.shape)
740
            warn('Warning! Raw data length {} is not matching placeholder length {}. '
Rama Vasudevan's avatar
Rama Vasudevan committed
741
742
743
744
745
746
747
748
749
                  'Padding zeros for {}% of the data!'.format(raw_vec.shape, np.prod(self.h5_raw.shape), percentage_padded))

            padded_raw_vec = np.zeros(np.prod(self.h5_raw.shape), dtype = np.complex64)

            padded_raw_vec[:raw_vec.shape[0]] = raw_vec
            raw_mat = padded_raw_vec.reshape(self.h5_raw.shape[0], self.h5_raw.shape[1])
        else:
            raw_mat = raw_vec.reshape(self.h5_raw.shape[0], self.h5_raw.shape[1])

Somnath, Suhas's avatar
Somnath, Suhas committed
750
        # Write to the h5 dataset:
Somnath, Suhas's avatar
Somnath, Suhas committed
751
752
753
        self.mean_resp = np.mean(raw_mat, axis=0)
        self.max_resp = np.amax(np.abs(raw_mat), axis=0)
        self.min_resp = np.amin(np.abs(raw_mat), axis=0)
754
        self.h5_raw[:, :] = np.complex64(raw_mat)
755
        self.h5_raw.file.flush()
Somnath, Suhas's avatar
Somnath, Suhas committed
756

Unknown's avatar
Unknown committed
757
758
        print('---- Finished reading files -----')

759
760
    @staticmethod
    def _parse_file_path(data_filepath):
Somnath, Suhas's avatar
Somnath, Suhas committed
761
762
763
764
765
766
767
        """
        Returns the basename and a dictionary containing the absolute file paths for the
        real and imaginary data files, text and mat parameter files in a dictionary
        
        Parameters 
        --------------------
        data_filepath: String / Unicode
Somnath, Suhas's avatar
Somnath, Suhas committed
768
            Absolute path of any file in the same directory as the .dat files
Somnath, Suhas's avatar
Somnath, Suhas committed
769
770
771
772
773
774
775
776
777
        
        Returns 
        --------------------
        basename : String / Unicode
            Basename of the dataset      
        path_dict : Dictionary
            Dictionary containing absolute paths of all necessary data and parameter files
        """
        (folder_path, basename) = path.split(data_filepath)
Unknown's avatar
Unknown committed
778
        (super_folder, basename) = path.split(folder_path)
Somnath, Suhas's avatar
Somnath, Suhas committed
779

780
781
        if basename.endswith('_d') or basename.endswith('_c'):
            # Old old data format where the folder ended with a _d or _c to denote a completed spectroscopic run
Somnath, Suhas's avatar
Somnath, Suhas committed
782
783
784
785
786
787
788
789
            basename = basename[:-2]
        """
        A single pair of real and imaginary files are / were generated for:
            BE-Line and BEPS (compiled version only generated out-of-field or 'read')
        Two pairs of real and imaginary files were generated for later BEPS datasets
            These have 'read' and 'write' prefixes to denote out or in field respectively
        """
        path_dict = dict()
Unknown's avatar
Unknown committed
790

Somnath, Suhas's avatar
Somnath, Suhas committed
791
        for file_name in listdir(folder_path):
Chris Smith's avatar
Chris Smith committed
792
            abs_path = path.join(folder_path, file_name)
Somnath, Suhas's avatar
Somnath, Suhas committed
793
794
795
796
797
            if file_name.endswith('.txt') and file_name.find('parm') > 0:
                path_dict['parm_txt'] = abs_path
            elif file_name.find('.mat') > 0:
                if file_name.find('more_parms') > 0:
                    path_dict['parm_mat'] = abs_path
Unknown's avatar
Unknown committed
798
                elif file_name == (basename + '.mat'):
Somnath, Suhas's avatar
Somnath, Suhas committed
799
800
801
802
803
804
805
806
807
808
809
810
                    path_dict['old_mat_parms'] = abs_path
            elif file_name.endswith('.dat'):
                # Need to account for the second AI channel here
                file_tag = 'read'
                if file_name.find('write') > 0:
                    file_tag = 'write'
                if file_name.find('real') > 0:
                    file_tag += '_real'
                elif file_name.find('imag') > 0:
                    file_tag += '_imag'
                path_dict[file_tag] = abs_path

Chris Smith's avatar
Chris Smith committed
811
        return basename, path_dict
Somnath, Suhas's avatar
Somnath, Suhas committed
812

ssomnath's avatar
ssomnath committed
813
    def _read_secondary_channel(self, h5_meas_group, aux_file_path):
814
815
816
817
818
819
820
821
822
823
824
825
        """
        Reads secondary channel stored in AI .mat file
        Currently works for in-field measurements only, but should be updated to
        include both in and out of field measurements

        Parameters
        -----------
        h5_meas_group : h5 group
            Reference to the Measurement group
        aux_file_path : String / Unicode
            Absolute file path of the secondary channel file.
        """
ssomnath's avatar
ssomnath committed
826
        if self._verbose:
827
            print('\t---------- Reading Secondary Channel  ----------')
828
        if isinstance(aux_file_path, (list, tuple)):
829
830
831
832
            aux_file_paths = aux_file_path
        else:
            aux_file_paths = list(aux_file_path)

833
        is_in_out_field = 'Field' in self.h5_raw.spec_dim_labels
834

835
836
837
838
839
840
841
842
843
        if not is_in_out_field and len(aux_file_paths) > 1:
            # TODO: Find a better way to handle this
            warn('\t\tField was not varied but found more than one file for '
                 'secondary channel: {}.\n\t\tResults will be overwritten'
                 ''.format([path.split(item)[-1] for item in aux_file_paths]))
        elif is_in_out_field and len(aux_file_paths) == 1:
            warn('\t\tField was varied but only one data file for secondary'
                 'channel was found. Half the data will be zeros')

844
        spectral_len = 1
845
846
847
        for dim_name, dim_size in zip(self.h5_raw.spec_dim_labels,
                                      self.h5_raw.spec_dim_sizes):
            if dim_name == 'Frequency':
848
                continue
849
            spectral_len = spectral_len * dim_size
850

851
        num_pix = self.h5_raw.shape[0]
ssomnath's avatar
ssomnath committed
852
        if self._verbose:
853
854