be_odf.py 74.8 KB
Newer Older
Somnath, Suhas's avatar
Somnath, Suhas committed
1
2
3
4
5
6
7
# -*- coding: utf-8 -*-
"""
Created on Tue Nov  3 15:24:12 2015

@author: Suhas Somnath, Stephen Jesse
"""

8
from __future__ import division, print_function, absolute_import, unicode_literals
9

Somnath, Suhas's avatar
Somnath, Suhas committed
10
from os import path, listdir, remove
11
import sys
12
import datetime
13
from warnings import warn
14
import h5py
Somnath, Suhas's avatar
Somnath, Suhas committed
15
16
import numpy as np
from scipy.io.matlab import loadmat  # To load parameters stored in Matlab .mat file
17

18
from .df_utils.be_utils import trimUDVS, getSpectroscopicParmLabel, parmsToDict, generatePlotGroups, \
19
20
    createSpecVals, requires_conjugate, generate_bipolar_triangular_waveform, \
    infer_bipolar_triangular_fraction_phase, nf32
21
from pyUSID.io.translator import Translator
22
23
from pyUSID.io.write_utils import INDICES_DTYPE, VALUES_DTYPE, Dimension, calc_chunks
from pyUSID.io.hdf_utils import write_ind_val_dsets, write_main_dataset, write_region_references, \
24
    create_indexed_group, write_simple_attrs, write_book_keeping_attrs, copy_attributes,\
25
    write_reduced_anc_dsets, get_unit_values
26
from pyUSID.io.usi_data import USIDataset
27
from pyUSID.processing.comp_utils import get_available_memory
28

29
30
31
if sys.version_info.major == 3:
    unicode = str

32

Somnath, Suhas's avatar
Somnath, Suhas committed
33
34
35
36
37
class BEodfTranslator(Translator):
    """
    Translates either the Band Excitation (BE) scan or Band Excitation 
    Polarization Switching (BEPS) data format from the old data format(s) to .h5
    """
Unknown's avatar
Unknown committed
38

Chris Smith's avatar
Chris Smith committed
39
40
41
    def __init__(self, *args, **kwargs):
        super(BEodfTranslator, self).__init__(*args, **kwargs)
        self.h5_raw = None
42
        self.num_rand_spectra = kwargs.pop('num_rand_spectra', 1000)
43
        self._cores = kwargs.pop('cores', None)
Unknown's avatar
Unknown committed
44
45
46
        self.FFT_BE_wave = None
        self.signal_type = None
        self.expt_type = None
47
        self._verbose = False
Chris Smith's avatar
Chris Smith committed
48

49
    @staticmethod
50
    def is_valid_file(data_path):
51
52
53
54
55
        """
        Checks whether the provided file can be read by this translator

        Parameters
        ----------
56
        data_path : str
57
58
59
60
            Path to raw data file

        Returns
        -------
61
62
63
64
        obj : str
            Path to file that will be accepted by the translate() function if
            this translator is indeed capable of translating the provided file.
            Otherwise, None will be returned
65
        """
66
67
68
69
70
71
72
73
        if not isinstance(data_path, (str, unicode)):
            raise TypeError('data_path must be a string')

        ndf = 'newdataformat'

        data_path = path.abspath(data_path)

        if path.isfile(data_path):
74
75
76
77
            ext = data_path.split('.')[-1]
            if ext.lower() not in ['jpg', 'png', 'jpeg', 'tiff', 'mat', 'txt',
                                   'dat', 'xls', 'xlsx']:
                return None
78
79
            # we only care about the folder names at this point...
            data_path, _ = path.split(data_path)
80
81

        # Check if the data is in the new or old format:
82
83
84
85
86
87
88
        # Check one level up:
        _, dir_name = path.split(data_path)
        if dir_name == ndf:
            # Though this translator could also read the files but the NDF Translator is more robust...
            return None
        # Check one level down:
        if ndf in listdir(data_path):
89
            # Though this translator could also read the files but the NDF Translator is more robust...
90
91
92
            return None

        file_path = path.join(data_path, listdir(path=data_path)[0])
93
94

        _, path_dict = BEodfTranslator._parse_file_path(file_path)
95

96
97
        if any([x.find('bigtime_0') > 0 and x.endswith('.dat') for x in path_dict.values()]):
            # This is a G-mode Line experiment:
98
            return None
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
        if any([x.find('bigtime_0') > 0 and x.endswith('.dat') for x in
                path_dict.values()]):
            # This is a G-mode Line experiment:
            return None

        parm_found = any([piece in path_dict.keys() for piece in
                          ['parm_txt', 'old_mat_parms']])
        real_found = any([piece in path_dict.keys() for piece in
                          ['read_real', 'write_real']])
        imag_found = any([piece in path_dict.keys() for piece in
                          ['read_imag', 'write_imag']])

        if parm_found and real_found and imag_found:
            if 'parm_txt' in path_dict.keys():
                return path_dict['parm_txt']
            else:
                return path_dict['old_mat_parms']
116
        else:
117
            return None
118

119
120
    def translate(self, file_path, show_plots=True, save_plots=True,
                  do_histogram=False, verbose=False):
Somnath, Suhas's avatar
Somnath, Suhas committed
121
122
123
124
125
126
127
128
129
130
131
132
133
134
        """
        Translates .dat data file(s) to a single .h5 file
        
        Parameters
        -------------
        file_path : String / Unicode
            Absolute file path for one of the data files. 
            It is assumed that this file is of the OLD data format.
        show_plots : (optional) Boolean
            Whether or not to show intermediate plots
        save_plots : (optional) Boolean
            Whether or not to save plots to disk
        do_histogram : (optional) Boolean
            Whether or not to construct histograms to visualize data quality. Note - this takes a fair amount of time
135
136
        verbose : (optional) Boolean
            Whether or not to print statements
Somnath, Suhas's avatar
Somnath, Suhas committed
137
138
139
140
141
142
            
        Returns
        ----------
        h5_path : String / Unicode
            Absolute path of the resultant .h5 file
        """
ssomnath's avatar
ssomnath committed
143
144
        self._verbose = verbose

145
        file_path = path.abspath(file_path)
Somnath, Suhas's avatar
Somnath, Suhas committed
146
        (folder_path, basename) = path.split(file_path)
147
        (basename, path_dict) = self._parse_file_path(file_path)
Unknown's avatar
Unknown committed
148

Somnath, Suhas's avatar
Somnath, Suhas committed
149
        h5_path = path.join(folder_path, basename + '.h5')
Somnath, Suhas's avatar
Somnath, Suhas committed
150
151
        tot_bins_multiplier = 1
        udvs_denom = 2
Unknown's avatar
Unknown committed
152

Somnath, Suhas's avatar
Somnath, Suhas committed
153
        if 'parm_txt' in path_dict.keys():
ssomnath's avatar
ssomnath committed
154
            if self._verbose:
155
                print('\treading parameters from text file')
ssomnath's avatar
ssomnath committed
156
157
            isBEPS, parm_dict = parmsToDict(path_dict['parm_txt'])

Somnath, Suhas's avatar
Somnath, Suhas committed
158
        elif 'old_mat_parms' in path_dict.keys():
ssomnath's avatar
ssomnath committed
159
            if self._verbose:
160
                print('\treading parameters from old mat file')
ssomnath's avatar
ssomnath committed
161
            parm_dict = self._get_parms_from_old_mat(path_dict['old_mat_parms'], verbose=self._verbose)
162
163
164
165
            if parm_dict['VS_steps_per_full_cycle'] == 0:
                isBEPS=False
            else:
                isBEPS=True
Somnath, Suhas's avatar
Somnath, Suhas committed
166
        else:
ssomnath's avatar
ssomnath committed
167
168
            raise FileNotFoundError('No parameters file found! Cannot '
                                    'translate this dataset!')
169

170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
        # Initial text / mat files named some parameters differently
        if parm_dict['VS_mode'] == 'AC modulation mode':
            warn('Updating parameter "VS_mode" from invalid value'
                 ' of "AC modulation mode" to "AC modulation mode with '
                 'time reversal"')
            parm_dict['VS_mode'] = 'AC modulation mode with time reversal'
        if parm_dict['BE_phase_content'] == 'chirp':
            warn('Updating parameter "BE_phase_content" from older value'
                 ' of "chirp" to "chirp-sinc hybrid"')
            parm_dict['BE_phase_content'] = 'chirp-sinc hybrid'
        if parm_dict['BE_amplitude_[V]'] < 1E-2:
            new_val = 0.5151
            warn('Updating parameter "BE_amplitude_[V]" from invalid value'
                 ' of {} to {}'.format(parm_dict['BE_amplitude_[V]'],
                                       new_val))
            parm_dict['BE_amplitude_[V]'] = new_val
186
187
        if 'VS_amplitude_[V]' in parm_dict.keys():
            if parm_dict['VS_amplitude_[V]'] < 1E-2:
188
                new_val = 1
189
190
                warn('Updating parameter "VS_amplitude_[V]" from invalid value'
                     ' of {} to {}'.format(parm_dict['VS_amplitude_[V]'],
191
                                           new_val))
192
                parm_dict['VS_amplitude_[V]'] = new_val
193

ssomnath's avatar
ssomnath committed
194
        if self._verbose:
195
196
            keys = list(parm_dict.keys())
            keys.sort()
197
            print('\tExperiment parameters:')
198
199
200
201
            for key in keys:
                print('\t\t{} : {}'.format(key, parm_dict[key]))

            print('\n\tisBEPS = {}'.format(isBEPS))
Unknown's avatar
Unknown committed
202

Somnath, Suhas's avatar
Somnath, Suhas committed
203
204
205
        ignored_plt_grps = []
        if isBEPS:
            parm_dict['data_type'] = 'BEPSData'
Unknown's avatar
Unknown committed
206

Somnath, Suhas's avatar
Somnath, Suhas committed
207
208
            field_mode = parm_dict['VS_measure_in_field_loops']
            std_expt = parm_dict['VS_mode'] != 'load user defined VS Wave from file'
Unknown's avatar
Unknown committed
209

Somnath, Suhas's avatar
Somnath, Suhas committed
210
            if not std_expt:
211
                raise ValueError('This translator does not handle user defined voltage spectroscopy')
Unknown's avatar
Unknown committed
212
213
214

            spec_label = getSpectroscopicParmLabel(parm_dict['VS_mode'])

Somnath, Suhas's avatar
Somnath, Suhas committed
215
            if parm_dict['VS_mode'] in ['DC modulation mode', 'current mode']:
Somnath, Suhas's avatar
Somnath, Suhas committed
216
217
218
219
220
221
222
223
224
225
226
                if field_mode == 'in and out-of-field':
                    tot_bins_multiplier = 2
                    udvs_denom = 1
                else:
                    if field_mode == 'out-of-field':
                        ignored_plt_grps = ['in-field']
                    else:
                        ignored_plt_grps = ['out-of-field']
            else:
                tot_bins_multiplier = 1
                udvs_denom = 1
Unknown's avatar
Unknown committed
227

Somnath, Suhas's avatar
Somnath, Suhas committed
228
229
230
        else:
            spec_label = 'None'
            parm_dict['data_type'] = 'BELineData'
Unknown's avatar
Unknown committed
231

Somnath, Suhas's avatar
Somnath, Suhas committed
232
        # Check file sizes:
ssomnath's avatar
ssomnath committed
233
        if self._verbose:
234
235
            print('\tChecking sizes of real and imaginary data files')

Somnath, Suhas's avatar
Somnath, Suhas committed
236
        if 'read_real' in path_dict.keys():
Somnath, Suhas's avatar
Somnath, Suhas committed
237
238
            real_size = path.getsize(path_dict['read_real'])
            imag_size = path.getsize(path_dict['read_imag'])
Somnath, Suhas's avatar
Somnath, Suhas committed
239
240
241
        else:
            real_size = path.getsize(path_dict['write_real'])
            imag_size = path.getsize(path_dict['write_imag'])
Unknown's avatar
Unknown committed
242

Somnath, Suhas's avatar
Somnath, Suhas committed
243
        if real_size != imag_size:
ssomnath's avatar
ssomnath committed
244
245
246
247
            raise ValueError("Real and imaginary file sizes do not match!")

        if real_size == 0:
            raise ValueError('Real and imaginary files were empty')
Somnath, Suhas's avatar
Somnath, Suhas committed
248

249
        # Check here if a second channel for current is present
250
251
        # Look for the file containing the current data

ssomnath's avatar
ssomnath committed
252
        if self._verbose:
253
            print('\tLooking for secondary channels')
254
255
        file_names = listdir(folder_path)
        aux_files = []
Unknown's avatar
Unknown committed
256
        current_data_exists = False
257
258
259
260
261
262
263
        for fname in file_names:
            if 'AI2' in fname:
                if 'write' in fname:
                    current_file = path.join(folder_path, fname)
                    current_data_exists=True
                aux_files.append(path.join(folder_path, fname))

Unknown's avatar
Unknown committed
264
        add_pix = False
Somnath, Suhas's avatar
Somnath, Suhas committed
265
266
        num_rows = int(parm_dict['grid_num_rows'])
        num_cols = int(parm_dict['grid_num_cols'])
ssomnath's avatar
ssomnath committed
267
        if self._verbose:
268
            print('\tRows: {}, Cols: {}'.format(num_rows, num_cols))
Unknown's avatar
Unknown committed
269
270
        num_pix = num_rows * num_cols
        tot_bins = real_size / (num_pix * 4)
Chris Smith's avatar
Chris Smith committed
271
        # Check for case where only a single pixel is missing.
272
273
274
275
        if num_pix == 1:
            check_bins = real_size / (num_pix * 4)
        else:
            check_bins = real_size / ((num_pix - 1) * 4)
Unknown's avatar
Unknown committed
276

ssomnath's avatar
ssomnath committed
277
        if self._verbose:
278
279
280
            print('\tChecking bins: Total: {}, actual: {}'.format(tot_bins,
                                                                  check_bins))

Unknown's avatar
Unknown committed
281
        if tot_bins % 1 and check_bins % 1:
282
283
            raise ValueError('Aborting! Some parameter appears to have '
                             'changed in-between')
Somnath, Suhas's avatar
Somnath, Suhas committed
284
        elif not tot_bins % 1:
Chris Smith's avatar
Chris Smith committed
285
            # Everything's ok
Somnath, Suhas's avatar
Somnath, Suhas committed
286
287
288
            pass
        elif not check_bins % 1:
            tot_bins = check_bins
289
290
            warn('Warning:  A pixel seems to be missing from the data. '
                 'File will be padded with zeros.')
Unknown's avatar
Unknown committed
291
292
293
294
            add_pix = True

        tot_bins = int(tot_bins) * tot_bins_multiplier

Somnath, Suhas's avatar
Somnath, Suhas committed
295
        if isBEPS:
ssomnath's avatar
ssomnath committed
296
            if self._verbose:
297
                print('\tBuilding UDVS table for BEPS')
ssomnath's avatar
ssomnath committed
298
            UDVS_labs, UDVS_units, UDVS_mat = self._build_udvs_table(parm_dict)
Unknown's avatar
Unknown committed
299

ssomnath's avatar
ssomnath committed
300
            if self._verbose:
301
                print('\tTrimming UDVS table to remove unused plot group columns')
302

303
            UDVS_mat, UDVS_labs, UDVS_units = trimUDVS(UDVS_mat, UDVS_labs, UDVS_units, ignored_plt_grps)
Unknown's avatar
Unknown committed
304

305
            old_spec_inds = np.zeros(shape=(2, tot_bins), dtype=INDICES_DTYPE)
Unknown's avatar
Unknown committed
306

307
            # Will assume that all excitation waveforms have same num of bins
Unknown's avatar
Unknown committed
308
309
            num_actual_udvs_steps = UDVS_mat.shape[0] / udvs_denom
            bins_per_step = tot_bins / num_actual_udvs_steps
ssomnath's avatar
ssomnath committed
310
            if self._verbose:
311
312
                print('\t# UDVS steps: {}, # bins/step: {}'
                      ''.format(num_actual_udvs_steps, bins_per_step))
Unknown's avatar
Unknown committed
313

Somnath, Suhas's avatar
Somnath, Suhas committed
314
            if bins_per_step % 1:
Somnath, Suhas's avatar
Somnath, Suhas committed
315
316
                print('UDVS mat shape: {}, total bins: {}, bins per step: {}'.format(UDVS_mat.shape, tot_bins,
                                                                                     bins_per_step))
317
                raise ValueError('Non integer number of bins per step!')
Unknown's avatar
Unknown committed
318

Somnath, Suhas's avatar
Somnath, Suhas committed
319
320
            bins_per_step = int(bins_per_step)
            num_actual_udvs_steps = int(num_actual_udvs_steps)
Unknown's avatar
Unknown committed
321

322
323
324
            if len(np.unique(UDVS_mat[:, 2])) == 0:
                raise ValueError('No non-zero rows in AC amplitude')

Unknown's avatar
Unknown committed
325
326
            stind = 0
            for step_index in range(UDVS_mat.shape[0]):
Unknown's avatar
Unknown committed
327
328
329
                if UDVS_mat[step_index, 2] < 1E-3:  # invalid AC amplitude
                    continue
                # Bin step
330
                old_spec_inds[0, stind:stind + bins_per_step] = np.arange(bins_per_step, dtype=INDICES_DTYPE)
Unknown's avatar
Unknown committed
331
                # UDVS step
332
                old_spec_inds[1, stind:stind + bins_per_step] = step_index * np.ones(bins_per_step, dtype=INDICES_DTYPE)
Somnath, Suhas's avatar
Somnath, Suhas committed
333
                stind += bins_per_step
Somnath, Suhas's avatar
Somnath, Suhas committed
334
            del stind, step_index
Unknown's avatar
Unknown committed
335

Somnath, Suhas's avatar
Somnath, Suhas committed
336
        else:  # BE Line
ssomnath's avatar
ssomnath committed
337
            if self._verbose:
338
                print('\tPreparing supporting variables since BE-Line')
Somnath, Suhas's avatar
Somnath, Suhas committed
339
            self.signal_type = 1
Somnath, Suhas's avatar
Somnath, Suhas committed
340
            self.expt_type = 1  # Stephen has not used this index for some reason
Somnath, Suhas's avatar
Somnath, Suhas committed
341
342
            num_actual_udvs_steps = 1
            bins_per_step = tot_bins
Somnath, Suhas's avatar
Somnath, Suhas committed
343
            UDVS_labs = ['step_num', 'dc_offset', 'ac_amp', 'wave_type', 'wave_mod', 'be-line']
Somnath, Suhas's avatar
Somnath, Suhas committed
344
            UDVS_units = ['', 'V', 'A', '', '', '']
Somnath, Suhas's avatar
Somnath, Suhas committed
345
346
            UDVS_mat = np.array([1, 0, parm_dict['BE_amplitude_[V]'], 1, 1, 1],
                                dtype=np.float32).reshape(1, len(UDVS_labs))
Somnath, Suhas's avatar
Somnath, Suhas committed
347

Chris Smith's avatar
Chris Smith committed
348
349
            old_spec_inds = np.vstack((np.arange(tot_bins, dtype=INDICES_DTYPE),
                                       np.zeros(tot_bins, dtype=INDICES_DTYPE)))
Unknown's avatar
Unknown committed
350

351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
        if 'parm_mat' in path_dict.keys():
            if self._verbose:
                print('\treading BE arrays from parameters text file')
            bin_inds, bin_freqs, bin_FFT, ex_wfm = self._read_parms_mat(path_dict['parm_mat'], isBEPS)
        elif 'old_mat_parms' in path_dict.keys():
            if self._verbose:
                print('\treading BE arrays from old mat text file')
            bin_inds, bin_freqs, bin_FFT, ex_wfm, dc_amp_vec = self._read_old_mat_be_vecs(path_dict['old_mat_parms'], verbose=verbose)
        else:
            warn('No secondary parameters file (.mat) provided. Generating '
                 'dummy BE arrays')
            band_width = parm_dict['BE_band_width_[Hz]'] * (0.5 - parm_dict['BE_band_edge_trim'])
            st_f = parm_dict['BE_center_frequency_[Hz]'] - band_width
            en_f = parm_dict['BE_center_frequency_[Hz]'] + band_width
            bin_freqs = np.linspace(st_f, en_f, bins_per_step, dtype=np.float32)

            if verbose:
                print('Number of bins: {}'.format(bins_per_step))
            bin_inds = np.zeros(shape=bins_per_step, dtype=np.int32)
            bin_FFT = np.zeros(shape=bins_per_step, dtype=np.complex64)
            ex_wfm = np.zeros(shape=bins_per_step, dtype=np.float32)

        # Forcing standardized datatypes:
        bin_inds = np.int32(bin_inds)
        bin_freqs = np.float32(bin_freqs)
        bin_FFT = np.complex64(bin_FFT)
        ex_wfm = np.float32(ex_wfm)

        self.FFT_BE_wave = bin_FFT

381
        # legacy parmeters inserted for BEAM
Somnath, Suhas's avatar
Somnath, Suhas committed
382
383
        parm_dict['num_bins'] = tot_bins
        parm_dict['num_pix'] = num_pix
384
        parm_dict['num_udvs_steps'] = num_actual_udvs_steps
Rama Vasudevan's avatar
Rama Vasudevan committed
385
        parm_dict['num_steps'] = num_actual_udvs_steps
Unknown's avatar
Unknown committed
386

ssomnath's avatar
ssomnath committed
387
        if self._verbose:
388
            print('\tPreparing UDVS slices for region references')
Somnath, Suhas's avatar
Somnath, Suhas committed
389
        udvs_slices = dict()
Somnath, Suhas's avatar
Somnath, Suhas committed
390
        for col_ind, col_name in enumerate(UDVS_labs):
Unknown's avatar
Unknown committed
391
392
            udvs_slices[col_name] = (slice(None), slice(col_ind, col_ind + 1))

Somnath, Suhas's avatar
Somnath, Suhas committed
393
        # Need to add the Bin Waveform type - infer from UDVS        
Unknown's avatar
Unknown committed
394
        exec_bin_vec = self.signal_type * np.ones(len(bin_inds), dtype=np.int32)
Somnath, Suhas's avatar
Somnath, Suhas committed
395
396

        if self.expt_type == 2:
ssomnath's avatar
ssomnath committed
397
            if self._verbose:
398
                print('\tExperiment type = 2. Doubling BE vectors')
Unknown's avatar
Unknown committed
399
            exec_bin_vec = np.hstack((exec_bin_vec, -1 * exec_bin_vec))
Somnath, Suhas's avatar
Somnath, Suhas committed
400
401
            bin_inds = np.hstack((bin_inds, bin_inds))
            bin_freqs = np.hstack((bin_freqs, bin_freqs))
Somnath, Suhas's avatar
Somnath, Suhas committed
402
            # This is wrong but I don't know what else to do
Somnath, Suhas's avatar
Somnath, Suhas committed
403
            bin_FFT = np.hstack((bin_FFT, bin_FFT))
Unknown's avatar
Unknown committed
404

Somnath, Suhas's avatar
Somnath, Suhas committed
405
        # Create Spectroscopic Values and Spectroscopic Values Labels datasets
406
        # This is an old and legacy way of doing things. Ideally, all we would need ot do is just get the unit values
ssomnath's avatar
ssomnath committed
407
        if self._verbose:
408
            print('\tCalculating spectroscopic values')
ssomnath's avatar
ssomnath committed
409
410
411
412
        ret_vals = createSpecVals(UDVS_mat, old_spec_inds, bin_freqs,
                                  exec_bin_vec, parm_dict, UDVS_labs,
                                  UDVS_units, verbose=verbose)
        spec_vals, spec_inds, spec_vals_labs, spec_vals_units, spec_vals_labs_names = ret_vals
413

ssomnath's avatar
ssomnath committed
414
        if self._verbose:
415
            print('\t\tspec_vals_labs: {}'.format(spec_vals_labs))
416
417
418
            unit_vals = get_unit_values(spec_inds, spec_vals,
                                        all_dim_names=spec_vals_labs,
                                        is_spec=True, verbose=False)
419
420
421
422
            print('\tUnit spectroscopic values')
            for key, val in unit_vals.items():
                print('\t\t{} : length: {}, values:\n\t\t\t{}'.format(key, len(val), val))

423
424
425
426
        if spec_inds.shape[1] != tot_bins:
            raise ValueError('Second axis of spectroscopic indices: {} not '
                             'matching with second axis of the expected main '
                             'dataset: {}'.format(spec_inds.shape, tot_bins))
427

428
429
430
431
        # Not sure what is happening here but this should work.
        spec_dim_dict = dict()
        for entry in spec_vals_labs_names:
            spec_dim_dict[entry[0] + '_parameters'] = entry[1]
Chris Smith's avatar
Chris Smith committed
432

Somnath, Suhas's avatar
Somnath, Suhas committed
433
434
435
        spec_vals_slices = dict()

        for row_ind, row_name in enumerate(spec_vals_labs):
Unknown's avatar
Unknown committed
436
            spec_vals_slices[row_name] = (slice(row_ind, row_ind + 1), slice(None))
Somnath, Suhas's avatar
Somnath, Suhas committed
437

438
        if path.exists(h5_path):
ssomnath's avatar
ssomnath committed
439
            if self._verbose:
440
                print('\tRemoving existing / old translated file: ' + h5_path)
441
            remove(h5_path)
Chris Smith's avatar
Chris Smith committed
442

443
        # First create the file
ssomnath's avatar
ssomnath committed
444
        h5_f = h5py.File(h5_path, mode='w')
Somnath, Suhas's avatar
Somnath, Suhas committed
445

446
        # Then write root level attributes
447
        global_parms = dict()
Somnath, Suhas's avatar
Somnath, Suhas committed
448
449
        global_parms['grid_size_x'] = parm_dict['grid_num_cols']
        global_parms['grid_size_y'] = parm_dict['grid_num_rows']
Somnath, Suhas's avatar
Somnath, Suhas committed
450
451
452
453
        try:
            global_parms['experiment_date'] = parm_dict['File_date_and_time']
        except KeyError:
            global_parms['experiment_date'] = '1:1:1'
Chris Smith's avatar
Chris Smith committed
454

Somnath, Suhas's avatar
Somnath, Suhas committed
455
        # assuming that the experiment was completed:
Unknown's avatar
Unknown committed
456
457
        global_parms['current_position_x'] = parm_dict['grid_num_cols'] - 1
        global_parms['current_position_y'] = parm_dict['grid_num_rows'] - 1
Somnath, Suhas's avatar
Somnath, Suhas committed
458
        global_parms['data_type'] = parm_dict['data_type']
Somnath, Suhas's avatar
Somnath, Suhas committed
459
        global_parms['translator'] = 'ODF'
ssomnath's avatar
ssomnath committed
460
        if self._verbose:
461
            print('\tWriting attributes to HDF5 file root')
462
        write_simple_attrs(h5_f, global_parms)
463
        write_book_keeping_attrs(h5_f)
Unknown's avatar
Unknown committed
464

465
466
        # Then create the measurement group
        h5_meas_group = create_indexed_group(h5_f, 'Measurement')
Unknown's avatar
Unknown committed
467

468
        # Write attributes at the measurement group level
ssomnath's avatar
ssomnath committed
469
        if self._verbose:
470
            print('\twriting attributes to Measurement group')
471
        write_simple_attrs(h5_meas_group, parm_dict)
Unknown's avatar
Unknown committed
472

473
474
        # Create the Channel group
        h5_chan_grp = create_indexed_group(h5_meas_group, 'Channel')
Unknown's avatar
Unknown committed
475

476
        # Write channel group attributes
Rama Vasudevan's avatar
Rama Vasudevan committed
477
478
        write_simple_attrs(h5_chan_grp, {'Channel_Input': 'IO_Analog_Input_1',
                                         'channel_type': 'BE'})
Unknown's avatar
Unknown committed
479

480
        # Now the datasets!
ssomnath's avatar
ssomnath committed
481
        if self._verbose:
482
            print('\tCreating ancillary datasets')
Chris Smith's avatar
Chris Smith committed
483
        h5_chan_grp.create_dataset('Excitation_Waveform', data=ex_wfm)
Unknown's avatar
Unknown committed
484

485
        h5_udvs = h5_chan_grp.create_dataset('UDVS', data=UDVS_mat)
ssomnath's avatar
ssomnath committed
486
487
488
        # TODO: Avoid using region references in USID
        write_region_references(h5_udvs, udvs_slices, add_labels_attr=True, verbose=self._verbose)
        write_simple_attrs(h5_udvs, {'units': UDVS_units}, verbose=False)
489

Chris Smith's avatar
Chris Smith committed
490
        h5_chan_grp.create_dataset('UDVS_Indices', data=old_spec_inds[1])
491

Chris Smith's avatar
Chris Smith committed
492
493
        h5_chan_grp.create_dataset('Bin_Step', data=np.arange(bins_per_step, dtype=INDICES_DTYPE),
                                   dtype=INDICES_DTYPE)
494

Chris Smith's avatar
Chris Smith committed
495
496
497
498
        h5_chan_grp.create_dataset('Bin_Indices', data=bin_inds, dtype=INDICES_DTYPE)
        h5_chan_grp.create_dataset('Bin_Frequencies', data=bin_freqs)
        h5_chan_grp.create_dataset('Bin_FFT', data=bin_FFT)
        h5_chan_grp.create_dataset('Bin_Wfm_Type', data=exec_bin_vec)
499

ssomnath's avatar
ssomnath committed
500
        if self._verbose:
501
502
503
504
            print('\tWriting Position datasets')

        pos_dims = [Dimension('X', 'm', np.arange(num_cols)),
                    Dimension('Y', 'm', np.arange(num_rows))]
ssomnath's avatar
ssomnath committed
505
506
        h5_pos_ind, h5_pos_val = write_ind_val_dsets(h5_chan_grp, pos_dims, is_spectral=False, verbose=self._verbose)
        if self._verbose:
507
            print('\tPosition datasets of shape: {}'.format(h5_pos_ind.shape))
508

ssomnath's avatar
ssomnath committed
509
        if self._verbose:
510
            print('\tWriting Spectroscopic datasets of shape: {}'.format(spec_inds.shape))
511
512
513
        h5_spec_inds = h5_chan_grp.create_dataset('Spectroscopic_Indices', data=spec_inds, dtype=INDICES_DTYPE)        
        h5_spec_vals = h5_chan_grp.create_dataset('Spectroscopic_Values', data=np.array(spec_vals), dtype=VALUES_DTYPE)
        for dset in [h5_spec_inds, h5_spec_vals]:
ssomnath's avatar
ssomnath committed
514
515
            write_region_references(dset, spec_vals_slices, add_labels_attr=True, verbose=self._verbose)
            write_simple_attrs(dset, {'units': spec_vals_units}, verbose=False)
516
            write_simple_attrs(dset, spec_dim_dict)
517
518

        # Noise floor should be of shape: (udvs_steps x 3 x positions)
ssomnath's avatar
ssomnath committed
519
        if self._verbose:
520
            print('\tWriting noise floor dataset')
Chris Smith's avatar
Chris Smith committed
521
522
        h5_chan_grp.create_dataset('Noise_Floor', (num_pix, num_actual_udvs_steps), dtype=nf32,
                                   chunks=(1, num_actual_udvs_steps))
523
524
525
526
527
528
529
530
531
532
533

        """
        New Method for chunking the Main_Data dataset.  Chunking is now done in N-by-N squares
        of UDVS steps by pixels.  N is determined dynamically based on the dimensions of the
        dataset.  Currently it is set such that individual chunks are less than 10kB in size.

        Chris Smith -- csmith55@utk.edu
        """
        BEPS_chunks = calc_chunks([num_pix, tot_bins],
                                  np.complex64(0).itemsize,
                                  unit_chunks=(1, bins_per_step))
ssomnath's avatar
ssomnath committed
534
        if self._verbose:
535
            print('\tHDF5 dataset will have chunks of size: {}'.format(BEPS_chunks))
536
            print('\tCreating empty main dataset of shape: ({}, {})'.format(num_pix, tot_bins))
537
538
539
        self.h5_raw = write_main_dataset(h5_chan_grp, (num_pix, tot_bins), 'Raw_Data', 'Piezoresponse', 'V', None, None,
                                         dtype=np.complex64, chunks=BEPS_chunks, compression='gzip',
                                         h5_pos_inds=h5_pos_ind, h5_pos_vals=h5_pos_val, h5_spec_inds=h5_spec_inds,
ssomnath's avatar
ssomnath committed
540
                                         h5_spec_vals=h5_spec_vals, verbose=self._verbose)
Somnath, Suhas's avatar
Somnath, Suhas committed
541

ssomnath's avatar
ssomnath committed
542
        if self._verbose:
543
544
            print('\tReading data from binary data files into raw HDF5')
        self._read_data(UDVS_mat, parm_dict, path_dict, real_size, isBEPS,
ssomnath's avatar
ssomnath committed
545
                        add_pix)
Unknown's avatar
Unknown committed
546

ssomnath's avatar
ssomnath committed
547
        if self._verbose:
548
            print('\tGenerating plot groups')
549
        generatePlotGroups(self.h5_raw, self.mean_resp, folder_path, basename,
Somnath, Suhas's avatar
Somnath, Suhas committed
550
                           self.max_resp, self.min_resp, max_mem_mb=self.max_ram,
Somnath, Suhas's avatar
Somnath, Suhas committed
551
                           spec_label=spec_label, show_plots=show_plots, save_plots=save_plots,
ssomnath's avatar
ssomnath committed
552
553
                           do_histogram=do_histogram, debug=self._verbose)
        if self._verbose:
554
            print('\tUpgrading to USIDataset')
555
        self.h5_raw = USIDataset(self.h5_raw)
Unknown's avatar
Unknown committed
556
557
558

        # Go ahead and read the current data in the second (current) channel
        if current_data_exists:                     #If a .dat file matches
ssomnath's avatar
ssomnath committed
559
            if self._verbose:
560
                print('\tReading data in secondary channels (current)')
561
            self._read_secondary_channel(h5_meas_group, aux_files)
562

ssomnath's avatar
ssomnath committed
563
        if self._verbose:
564
            print('\tClosing HDF5 file')
565
        h5_f.close()
Unknown's avatar
Unknown committed
566

Somnath, Suhas's avatar
Somnath, Suhas committed
567
        return h5_path
Chris Smith's avatar
Chris Smith committed
568

569
    def _read_data(self, UDVS_mat, parm_dict, path_dict, real_size, isBEPS,
ssomnath's avatar
ssomnath committed
570
                   add_pix):
Chris Smith's avatar
Chris Smith committed
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
        """
        Checks if the data is BEPS or BELine and calls the correct function to read the data from
        file

        Parameters
        ----------
        UDVS_mat : numpy.ndarray of float
            UDVS table
        parm_dict : dict
            Experimental parameters
        path_dict : dict
            Dictionary of data files to be read
        real_size : dict
            Size of each data file
        isBEPS : boolean
            Is the data BEPS
        add_pix : boolean
            Does the reader need to add extra pixels to the end of the dataset

        Returns
        -------
        None
        """
        # Now read the raw data files:
        if not isBEPS:
            # Do this for all BE-Line (always small enough to read in one shot)
ssomnath's avatar
ssomnath committed
597
            if self._verbose:
598
                print('\t\tReading all raw data for BE-Line in one shot')
599
600
            self._quick_read_data(path_dict['read_real'],
                                  path_dict['read_imag'],
ssomnath's avatar
ssomnath committed
601
                                  parm_dict['num_udvs_steps'])
602
603
        elif real_size < self.max_ram and \
                parm_dict['VS_measure_in_field_loops'] == 'out-of-field':
Chris Smith's avatar
Chris Smith committed
604
            # Do this for out-of-field BEPS ONLY that is also small (256 MB)
ssomnath's avatar
ssomnath committed
605
            if self._verbose:
606
607
608
                print('\t\tReading all raw BEPS (out-of-field) data at once')
            self._quick_read_data(path_dict['read_real'],
                                  path_dict['read_imag'],
ssomnath's avatar
ssomnath committed
609
                                  parm_dict['num_udvs_steps'])
610
611
        elif real_size < self.max_ram and \
                parm_dict['VS_measure_in_field_loops'] == 'in-field':
Chris Smith's avatar
Chris Smith committed
612
            # Do this for in-field only
ssomnath's avatar
ssomnath committed
613
            if self._verbose:
614
615
616
                print('\t\tReading all raw BEPS (in-field only) data at once')
            self._quick_read_data(path_dict['write_real'],
                                  path_dict['write_imag'],
ssomnath's avatar
ssomnath committed
617
                                  parm_dict['num_udvs_steps'])
Chris Smith's avatar
Chris Smith committed
618
619
        else:
            # Large BEPS datasets OR those with in-and-out of field
ssomnath's avatar
ssomnath committed
620
            if self._verbose:
621
622
623
624
625
                print('\t\tReading all raw data for in-and-out-of-field OR '
                      'very large file one pixel at a time')
            self._read_beps_data(path_dict, UDVS_mat.shape[0],
                                 parm_dict['VS_measure_in_field_loops'],
                                 add_pix)
626
        self.h5_raw.file.flush()
Chris Smith's avatar
Chris Smith committed
627

628
    def _read_beps_data(self, path_dict, udvs_steps, mode, add_pixel=False):
Somnath, Suhas's avatar
Somnath, Suhas committed
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
        """
        Reads the imaginary and real data files pixelwise and writes to the H5 file 
        
        Parameters 
        --------------------
        path_dict : dictionary
            Dictionary containing the absolute paths of the real and imaginary data files
        udvs_steps : unsigned int
            Number of UDVS steps
        mode : String / Unicode
            'in-field', 'out-of-field', or 'in and out-of-field'
        add_pixel : boolean. (Optional; default is False)
            If an empty pixel worth of data should be written to the end             
        
        Returns 
        -------------------- 
        None
        """
Unknown's avatar
Unknown committed
647

Somnath, Suhas's avatar
Somnath, Suhas committed
648
        print('---- reading pixel-by-pixel ----------')
Unknown's avatar
Unknown committed
649
650
651
652

        bytes_per_pix = self.h5_raw.shape[1] * 4
        step_size = self.h5_raw.shape[1] / udvs_steps

Somnath, Suhas's avatar
Somnath, Suhas committed
653
        if mode == 'out-of-field':
Unknown's avatar
Unknown committed
654
            parsers = [BEodfParser(path_dict['read_real'], path_dict['read_imag'],
Somnath, Suhas's avatar
Somnath, Suhas committed
655
                                   self.h5_raw.shape[0], bytes_per_pix)]
Somnath, Suhas's avatar
Somnath, Suhas committed
656
        elif mode == 'in-field':
Unknown's avatar
Unknown committed
657
            parsers = [BEodfParser(path_dict['write_real'], path_dict['write_imag'],
Somnath, Suhas's avatar
Somnath, Suhas committed
658
                                   self.h5_raw.shape[0], bytes_per_pix)]
Somnath, Suhas's avatar
Somnath, Suhas committed
659
660
        elif mode == 'in and out-of-field':
            # each file will only have half the udvs steps:
Unknown's avatar
Unknown committed
661
            if 0.5 * udvs_steps % 1:
662
663
                raise ValueError('Odd number of UDVS')

Unknown's avatar
Unknown committed
664
            udvs_steps = int(0.5 * udvs_steps)
Somnath, Suhas's avatar
Somnath, Suhas committed
665
            # be careful - each pair contains only half the necessary bins - so read half
Unknown's avatar
Unknown committed
666
            parsers = [BEodfParser(path_dict['write_real'], path_dict['write_imag'],
Somnath, Suhas's avatar
Somnath, Suhas committed
667
                                   self.h5_raw.shape[0], int(bytes_per_pix / 2)),
Unknown's avatar
Unknown committed
668
669
670
                       BEodfParser(path_dict['read_real'], path_dict['read_imag'],
                                   self.h5_raw.shape[0], int(bytes_per_pix / 2))]

Somnath, Suhas's avatar
Somnath, Suhas committed
671
            if step_size % 1:
672
673
                raise ValueError('strange number of bins per UDVS step. Exiting')

Somnath, Suhas's avatar
Somnath, Suhas committed
674
            step_size = int(step_size)
675

676
677
        rand_spectra = self._get_random_spectra(parsers, self.h5_raw.shape[0], udvs_steps, step_size,
                                                num_spectra=self.num_rand_spectra)
678
        take_conjugate = requires_conjugate(rand_spectra, cores=self._cores)
679

Somnath, Suhas's avatar
Somnath, Suhas committed
680
681
682
683
        self.mean_resp = np.zeros(shape=(self.h5_raw.shape[1]), dtype=np.complex64)
        self.max_resp = np.zeros(shape=(self.h5_raw.shape[0]), dtype=np.float32)
        self.min_resp = np.zeros(shape=(self.h5_raw.shape[0]), dtype=np.float32)

Unknown's avatar
Unknown committed
684
        numpix = self.h5_raw.shape[0]
Somnath, Suhas's avatar
Somnath, Suhas committed
685
686
687
        """ 
        Don't try to do the last step if a pixel is missing.   
        This will be handled after the loop. 
Unknown's avatar
Unknown committed
688
689
690
691
        """
        if add_pixel:
            numpix -= 1

Somnath, Suhas's avatar
Somnath, Suhas committed
692
        for pix_indx in range(numpix):
Somnath, Suhas's avatar
Somnath, Suhas committed
693
            if self.h5_raw.shape[0] > 5:
Unknown's avatar
Unknown committed
694
695
696
                if pix_indx % int(round(self.h5_raw.shape[0] / 10)) == 0:
                    print('Reading... {} complete'.format(round(100 * pix_indx / self.h5_raw.shape[0])))

Somnath, Suhas's avatar
Somnath, Suhas committed
697
698
699
            # get the raw stream from each parser
            pxl_data = list()
            for prsr in parsers:
Somnath, Suhas's avatar
Somnath, Suhas committed
700
                pxl_data.append(prsr.read_pixel())
Unknown's avatar
Unknown committed
701

Somnath, Suhas's avatar
Somnath, Suhas committed
702
703
704
705
706
            # interleave if both in and out of field
            # we are ignoring user defined possibilities...
            if mode == 'in and out-of-field':
                in_fld = pxl_data[0]
                out_fld = pxl_data[1]
Unknown's avatar
Unknown committed
707

Somnath, Suhas's avatar
Somnath, Suhas committed
708
709
                in_fld_2 = in_fld.reshape(udvs_steps, step_size)
                out_fld_2 = out_fld.reshape(udvs_steps, step_size)
Unknown's avatar
Unknown committed
710
                raw_mat = np.empty((udvs_steps * 2, step_size), dtype=out_fld.dtype)
Somnath, Suhas's avatar
Somnath, Suhas committed
711
712
                raw_mat[0::2, :] = in_fld_2
                raw_mat[1::2, :] = out_fld_2
Somnath, Suhas's avatar
Somnath, Suhas committed
713
714
                raw_vec = raw_mat.reshape(in_fld.size + out_fld.size).transpose()
            else:
Somnath, Suhas's avatar
Somnath, Suhas committed
715
                raw_vec = pxl_data[0]  # only one parser
Somnath, Suhas's avatar
Somnath, Suhas committed
716
717
            self.max_resp[pix_indx] = np.max(np.abs(raw_vec))
            self.min_resp[pix_indx] = np.min(np.abs(raw_vec))
Unknown's avatar
Unknown committed
718
            self.mean_resp = (1 / (pix_indx + 1)) * (raw_vec + pix_indx * self.mean_resp)
719
720
721

            if take_conjugate:
                raw_vec = np.conjugate(raw_vec)
722
            self.h5_raw[pix_indx, :] = np.complex64(raw_vec[:])
723
            self.h5_raw.file.flush()
Unknown's avatar
Unknown committed
724

Somnath, Suhas's avatar
Somnath, Suhas committed
725
        # Add zeros to main_data for the missing pixel. 
Unknown's avatar
Unknown committed
726
727
728
        if add_pixel:
            self.h5_raw[-1, :] = 0 + 0j

Somnath, Suhas's avatar
Somnath, Suhas committed
729
        print('---- Finished reading files -----')
730

ssomnath's avatar
ssomnath committed
731
    def _quick_read_data(self, real_path, imag_path, udvs_steps):
Somnath, Suhas's avatar
Somnath, Suhas committed
732
        """
Somnath, Suhas's avatar
Somnath, Suhas committed
733
734
735
736
737
738
739
740
        Returns information about the excitation BE waveform present in the .mat file

        Parameters
        -----------
        real_path : String / Unicode
            Absolute file path of the real data file
        imag_path : String / Unicode
            Absolute file path of the real data file
741
742
        udvs_steps : unsigned int
            Number of UDVS steps
Somnath, Suhas's avatar
Somnath, Suhas committed
743
        """
744
745
        parser = BEodfParser(real_path, imag_path, self.h5_raw.shape[0],
                             self.h5_raw.shape[1] * 4)
746
747

        step_size = self.h5_raw.shape[1] / udvs_steps
748
749
750
751
        rand_spectra = self._get_random_spectra([parser],
                                                self.h5_raw.shape[0],
                                                udvs_steps, step_size,
                                                num_spectra=self.num_rand_spectra,
ssomnath's avatar
ssomnath committed
752
753
                                                verbose=self._verbose)
        if self._verbose:
754
            print('\t\t\tChecking if conjugate is required')
755
        take_conjugate = requires_conjugate(rand_spectra, cores=self._cores)
Somnath, Suhas's avatar
Somnath, Suhas committed
756
        raw_vec = parser.read_all_data()
757
        if take_conjugate:
ssomnath's avatar
ssomnath committed
758
            if self._verbose:
759
                print('\t'*4 + 'Taking conjugate for positive quality factors')
760
            raw_vec = np.conjugate(raw_vec)
Unknown's avatar
Unknown committed
761

Rama Vasudevan's avatar
Rama Vasudevan committed
762
763
        if raw_vec.shape != np.prod(self.h5_raw.shape):
            percentage_padded = 100 * (np.prod(self.h5_raw.shape) - raw_vec.shape) / np.prod(self.h5_raw.shape)
764
            warn('Warning! Raw data length {} is not matching placeholder length {}. '
Rama Vasudevan's avatar
Rama Vasudevan committed
765
766
767
768
769
770
771
772
773
                  'Padding zeros for {}% of the data!'.format(raw_vec.shape, np.prod(self.h5_raw.shape), percentage_padded))

            padded_raw_vec = np.zeros(np.prod(self.h5_raw.shape), dtype = np.complex64)

            padded_raw_vec[:raw_vec.shape[0]] = raw_vec
            raw_mat = padded_raw_vec.reshape(self.h5_raw.shape[0], self.h5_raw.shape[1])
        else:
            raw_mat = raw_vec.reshape(self.h5_raw.shape[0], self.h5_raw.shape[1])

Somnath, Suhas's avatar
Somnath, Suhas committed
774
        # Write to the h5 dataset:
Somnath, Suhas's avatar
Somnath, Suhas committed
775
776
777
        self.mean_resp = np.mean(raw_mat, axis=0)
        self.max_resp = np.amax(np.abs(raw_mat), axis=0)
        self.min_resp = np.amin(np.abs(raw_mat), axis=0)
778
        self.h5_raw[:, :] = np.complex64(raw_mat)
779
        self.h5_raw.file.flush()
Somnath, Suhas's avatar
Somnath, Suhas committed
780

Unknown's avatar
Unknown committed
781
782
        print('---- Finished reading files -----')

783
784
    @staticmethod
    def _parse_file_path(data_filepath):
Somnath, Suhas's avatar
Somnath, Suhas committed
785
786
787
788
789
790
791
        """
        Returns the basename and a dictionary containing the absolute file paths for the
        real and imaginary data files, text and mat parameter files in a dictionary
        
        Parameters 
        --------------------
        data_filepath: String / Unicode
Somnath, Suhas's avatar
Somnath, Suhas committed
792
            Absolute path of any file in the same directory as the .dat files
Somnath, Suhas's avatar
Somnath, Suhas committed
793
794
795
796
797
798
799
800
801
        
        Returns 
        --------------------
        basename : String / Unicode
            Basename of the dataset      
        path_dict : Dictionary
            Dictionary containing absolute paths of all necessary data and parameter files
        """
        (folder_path, basename) = path.split(data_filepath)
Unknown's avatar
Unknown committed
802
        (super_folder, basename) = path.split(folder_path)
Somnath, Suhas's avatar
Somnath, Suhas committed
803

804
805
        if basename.endswith('_d') or basename.endswith('_c'):
            # Old old data format where the folder ended with a _d or _c to denote a completed spectroscopic run
Somnath, Suhas's avatar
Somnath, Suhas committed
806
807
808
809
810
811
812
813
            basename = basename[:-2]
        """
        A single pair of real and imaginary files are / were generated for:
            BE-Line and BEPS (compiled version only generated out-of-field or 'read')
        Two pairs of real and imaginary files were generated for later BEPS datasets
            These have 'read' and 'write' prefixes to denote out or in field respectively
        """
        path_dict = dict()
Unknown's avatar
Unknown committed
814

Somnath, Suhas's avatar
Somnath, Suhas committed
815
        for file_name in listdir(folder_path):
Chris Smith's avatar
Chris Smith committed
816
            abs_path = path.join(folder_path, file_name)
Somnath, Suhas's avatar
Somnath, Suhas committed
817
818
819
820
821
            if file_name.endswith('.txt') and file_name.find('parm') > 0:
                path_dict['parm_txt'] = abs_path
            elif file_name.find('.mat') > 0:
                if file_name.find('more_parms') > 0:
                    path_dict['parm_mat'] = abs_path
Unknown's avatar
Unknown committed
822
                elif file_name == (basename + '.mat'):
Somnath, Suhas's avatar
Somnath, Suhas committed
823
824
825
826
827
828
829
830
831
832
833
834
                    path_dict['old_mat_parms'] = abs_path
            elif file_name.endswith('.dat'):
                # Need to account for the second AI channel here
                file_tag = 'read'
                if file_name.find('write') > 0:
                    file_tag = 'write'
                if file_name.find('real') > 0:
                    file_tag += '_real'
                elif file_name.find('imag') > 0:
                    file_tag += '_imag'
                path_dict[file_tag] = abs_path

Chris Smith's avatar
Chris Smith committed
835
        return basename, path_dict
Somnath, Suhas's avatar
Somnath, Suhas committed
836

ssomnath's avatar
ssomnath committed
837
    def _read_secondary_channel(self, h5_meas_group, aux_file_path):
838
839
840
841
842
843
844
845
846
847
848
849
        """
        Reads secondary channel stored in AI .mat file
        Currently works for in-field measurements only, but should be updated to
        include both in and out of field measurements

        Parameters
        -----------
        h5_meas_group : h5 group
            Reference to the Measurement group
        aux_file_path : String / Unicode
            Absolute file path of the secondary channel file.
        """
ssomnath's avatar
ssomnath committed
850
        if self._verbose:
851
            print('\t---------- Reading Secondary Channel  ----------')
852
        if isinstance(aux_file_path, (list, tuple)):
853
854
855
856
            aux_file_paths = aux_file_path
        else:
            aux_file_paths = list(aux_file_path)

857
        is_in_out_field = 'Field' in self.h5_raw.spec_dim_labels
858

859
860
861
862
863
864
865
866
867
        if not is_in_out_field and len(aux_file_paths) > 1:
            # TODO: Find a better way to handle this
            warn('\t\tField was not varied but found more than one file for '
                 'secondary channel: {}.\n\t\tResults will be overwritten'
                 ''.format([path.split(item)[-1] for item in aux_file_paths]))
        elif is_in_out_field and len(aux_file_paths) == 1:
            warn('\t\tField was varied but only one data file for secondary'
                 'channel was found. Half the data will be zeros')

868
        spectral_len = 1
869
870
871
        for dim_name, dim_size in zip(self.h5_raw.spec_dim_labels,
                                      self.h5_raw.spec_dim_sizes):
            if dim_name == 'Frequency':
872
                continue
873
            spectral_len = spectral_len * dim_size
874

875
        num_pix = self.h5_raw.shape[0]
ssomnath's avatar
ssomnath committed
876
        if self._verbose:
877
878
879
            print('\t\tExpecting this channel to be of shape: ({}, {})'
                  ''.format(num_pix, spectral_len))
            print('\t\tis_in_out_field: {}'.format(is_in_out_field))
880
881

        # create a new channel
882
883
        h5_current_channel_group = create_indexed_group(h5_meas_group,
                                                        'Channel')
884
885
886
887
888

        # Copy attributes from the main channel
        copy_attributes(