be_odf.py 73.2 KB
Newer Older
Somnath, Suhas's avatar
Somnath, Suhas committed
1
2
3
4
5
6
7
# -*- coding: utf-8 -*-
"""
Created on Tue Nov  3 15:24:12 2015

@author: Suhas Somnath, Stephen Jesse
"""

8
from __future__ import division, print_function, absolute_import, unicode_literals
9

Somnath, Suhas's avatar
Somnath, Suhas committed
10
from os import path, listdir, remove
11
import sys
12
import datetime
13
from warnings import warn
14
import h5py
Somnath, Suhas's avatar
Somnath, Suhas committed
15
16
import numpy as np
from scipy.io.matlab import loadmat  # To load parameters stored in Matlab .mat file
17

18
from .df_utils.be_utils import trimUDVS, getSpectroscopicParmLabel, parmsToDict, generatePlotGroups, \
19
20
    createSpecVals, requires_conjugate, generate_bipolar_triangular_waveform, \
    infer_bipolar_triangular_fraction_phase, nf32
21
from pyUSID.io.translator import Translator
22
23
from pyUSID.io.write_utils import INDICES_DTYPE, VALUES_DTYPE, Dimension, calc_chunks
from pyUSID.io.hdf_utils import write_ind_val_dsets, write_main_dataset, write_region_references, \
24
    create_indexed_group, write_simple_attrs, write_book_keeping_attrs, copy_attributes,\
25
    write_reduced_anc_dsets, get_unit_values
26
from pyUSID.io.usi_data import USIDataset
27
from pyUSID.processing.comp_utils import get_available_memory
28

29
30
31
if sys.version_info.major == 3:
    unicode = str

32

Somnath, Suhas's avatar
Somnath, Suhas committed
33
34
35
36
37
class BEodfTranslator(Translator):
    """
    Translates either the Band Excitation (BE) scan or Band Excitation 
    Polarization Switching (BEPS) data format from the old data format(s) to .h5
    """
Unknown's avatar
Unknown committed
38

Chris Smith's avatar
Chris Smith committed
39
40
41
    def __init__(self, *args, **kwargs):
        super(BEodfTranslator, self).__init__(*args, **kwargs)
        self.h5_raw = None
42
        self.num_rand_spectra = kwargs.pop('num_rand_spectra', 1000)
43
        self._cores = kwargs.pop('cores', None)
Unknown's avatar
Unknown committed
44
45
46
        self.FFT_BE_wave = None
        self.signal_type = None
        self.expt_type = None
47
        self._verbose = False
Chris Smith's avatar
Chris Smith committed
48

49
    @staticmethod
50
    def is_valid_file(data_path):
51
52
53
54
55
        """
        Checks whether the provided file can be read by this translator

        Parameters
        ----------
56
        data_path : str
57
58
59
60
            Path to raw data file

        Returns
        -------
61
62
63
64
        obj : str
            Path to file that will be accepted by the translate() function if
            this translator is indeed capable of translating the provided file.
            Otherwise, None will be returned
65
        """
66
67
68
69
70
71
72
73
        if not isinstance(data_path, (str, unicode)):
            raise TypeError('data_path must be a string')

        ndf = 'newdataformat'

        data_path = path.abspath(data_path)

        if path.isfile(data_path):
74
75
76
77
            ext = data_path.split('.')[-1]
            if ext.lower() not in ['jpg', 'png', 'jpeg', 'tiff', 'mat', 'txt',
                                   'dat', 'xls', 'xlsx']:
                return None
78
79
            # we only care about the folder names at this point...
            data_path, _ = path.split(data_path)
80
81

        # Check if the data is in the new or old format:
82
83
84
85
86
87
88
        # Check one level up:
        _, dir_name = path.split(data_path)
        if dir_name == ndf:
            # Though this translator could also read the files but the NDF Translator is more robust...
            return None
        # Check one level down:
        if ndf in listdir(data_path):
89
            # Though this translator could also read the files but the NDF Translator is more robust...
90
91
92
            return None

        file_path = path.join(data_path, listdir(path=data_path)[0])
93
94

        _, path_dict = BEodfTranslator._parse_file_path(file_path)
95

96
97
        if any([x.find('bigtime_0') > 0 and x.endswith('.dat') for x in path_dict.values()]):
            # This is a G-mode Line experiment:
98
            return None
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
        if any([x.find('bigtime_0') > 0 and x.endswith('.dat') for x in
                path_dict.values()]):
            # This is a G-mode Line experiment:
            return None

        parm_found = any([piece in path_dict.keys() for piece in
                          ['parm_txt', 'old_mat_parms']])
        real_found = any([piece in path_dict.keys() for piece in
                          ['read_real', 'write_real']])
        imag_found = any([piece in path_dict.keys() for piece in
                          ['read_imag', 'write_imag']])

        if parm_found and real_found and imag_found:
            if 'parm_txt' in path_dict.keys():
                return path_dict['parm_txt']
            else:
                return path_dict['old_mat_parms']
116
        else:
117
            return None
118

119
120
    def translate(self, file_path, show_plots=True, save_plots=True,
                  do_histogram=False, verbose=False):
Somnath, Suhas's avatar
Somnath, Suhas committed
121
122
123
124
125
126
127
128
129
130
131
132
133
134
        """
        Translates .dat data file(s) to a single .h5 file
        
        Parameters
        -------------
        file_path : String / Unicode
            Absolute file path for one of the data files. 
            It is assumed that this file is of the OLD data format.
        show_plots : (optional) Boolean
            Whether or not to show intermediate plots
        save_plots : (optional) Boolean
            Whether or not to save plots to disk
        do_histogram : (optional) Boolean
            Whether or not to construct histograms to visualize data quality. Note - this takes a fair amount of time
135
136
        verbose : (optional) Boolean
            Whether or not to print statements
Somnath, Suhas's avatar
Somnath, Suhas committed
137
138
139
140
141
142
            
        Returns
        ----------
        h5_path : String / Unicode
            Absolute path of the resultant .h5 file
        """
ssomnath's avatar
ssomnath committed
143
144
        self._verbose = verbose

145
        file_path = path.abspath(file_path)
Somnath, Suhas's avatar
Somnath, Suhas committed
146
        (folder_path, basename) = path.split(file_path)
147
        (basename, path_dict) = self._parse_file_path(file_path)
Unknown's avatar
Unknown committed
148

Somnath, Suhas's avatar
Somnath, Suhas committed
149
        h5_path = path.join(folder_path, basename + '.h5')
Somnath, Suhas's avatar
Somnath, Suhas committed
150
151
        tot_bins_multiplier = 1
        udvs_denom = 2
Unknown's avatar
Unknown committed
152

Somnath, Suhas's avatar
Somnath, Suhas committed
153
        if 'parm_txt' in path_dict.keys():
ssomnath's avatar
ssomnath committed
154
            if self._verbose:
155
                print('\treading parameters from text file')
ssomnath's avatar
ssomnath committed
156
157
158
            isBEPS, parm_dict = parmsToDict(path_dict['parm_txt'])

            # Initial text files named some parameters differently
159
            updated_parms = False
160
            if parm_dict['VS_mode'] == 'AC modulation mode':
161
                updated_parms = True
162
                parm_dict['VS_mode'] = 'AC modulation mode with time reversal'
ssomnath's avatar
ssomnath committed
163
            if parm_dict['BE_phase_content'] == 'chirp':
164
                updated_parms = True
ssomnath's avatar
ssomnath committed
165
                parm_dict['BE_phase_content'] ='chirp-sinc hybrid'
166
167
168
            if updated_parms:
                warn('Parameters were stored in text file with an older format'
                     '.Values for one or more parameters were updated')
ssomnath's avatar
ssomnath committed
169

Somnath, Suhas's avatar
Somnath, Suhas committed
170
        elif 'old_mat_parms' in path_dict.keys():
ssomnath's avatar
ssomnath committed
171
            if self._verbose:
172
                print('\treading parameters from old mat file')
ssomnath's avatar
ssomnath committed
173
            parm_dict = self._get_parms_from_old_mat(path_dict['old_mat_parms'], verbose=self._verbose)
174
175
176
177
            if parm_dict['VS_steps_per_full_cycle'] == 0:
                isBEPS=False
            else:
                isBEPS=True
Somnath, Suhas's avatar
Somnath, Suhas committed
178
        else:
179
            raise FileNotFoundError('No parameters file found! Cannot translate this dataset!')
180

ssomnath's avatar
ssomnath committed
181
        if self._verbose:
182
183
            keys = list(parm_dict.keys())
            keys.sort()
184
            print('\tExperiment parameters:')
185
186
187
188
            for key in keys:
                print('\t\t{} : {}'.format(key, parm_dict[key]))

            print('\n\tisBEPS = {}'.format(isBEPS))
Unknown's avatar
Unknown committed
189

Somnath, Suhas's avatar
Somnath, Suhas committed
190
191
192
        ignored_plt_grps = []
        if isBEPS:
            parm_dict['data_type'] = 'BEPSData'
Unknown's avatar
Unknown committed
193

Somnath, Suhas's avatar
Somnath, Suhas committed
194
195
            field_mode = parm_dict['VS_measure_in_field_loops']
            std_expt = parm_dict['VS_mode'] != 'load user defined VS Wave from file'
Unknown's avatar
Unknown committed
196

Somnath, Suhas's avatar
Somnath, Suhas committed
197
            if not std_expt:
198
                raise ValueError('This translator does not handle user defined voltage spectroscopy')
Unknown's avatar
Unknown committed
199
200
201

            spec_label = getSpectroscopicParmLabel(parm_dict['VS_mode'])

Somnath, Suhas's avatar
Somnath, Suhas committed
202
            if parm_dict['VS_mode'] in ['DC modulation mode', 'current mode']:
Somnath, Suhas's avatar
Somnath, Suhas committed
203
204
205
206
207
208
209
210
211
212
213
                if field_mode == 'in and out-of-field':
                    tot_bins_multiplier = 2
                    udvs_denom = 1
                else:
                    if field_mode == 'out-of-field':
                        ignored_plt_grps = ['in-field']
                    else:
                        ignored_plt_grps = ['out-of-field']
            else:
                tot_bins_multiplier = 1
                udvs_denom = 1
Unknown's avatar
Unknown committed
214

Somnath, Suhas's avatar
Somnath, Suhas committed
215
216
217
        else:
            spec_label = 'None'
            parm_dict['data_type'] = 'BELineData'
Unknown's avatar
Unknown committed
218

Somnath, Suhas's avatar
Somnath, Suhas committed
219
        # Check file sizes:
ssomnath's avatar
ssomnath committed
220
        if self._verbose:
221
222
            print('\tChecking sizes of real and imaginary data files')

Somnath, Suhas's avatar
Somnath, Suhas committed
223
        if 'read_real' in path_dict.keys():
Somnath, Suhas's avatar
Somnath, Suhas committed
224
225
            real_size = path.getsize(path_dict['read_real'])
            imag_size = path.getsize(path_dict['read_imag'])
Somnath, Suhas's avatar
Somnath, Suhas committed
226
227
228
        else:
            real_size = path.getsize(path_dict['write_real'])
            imag_size = path.getsize(path_dict['write_imag'])
Unknown's avatar
Unknown committed
229

Somnath, Suhas's avatar
Somnath, Suhas committed
230
        if real_size != imag_size:
ssomnath's avatar
ssomnath committed
231
232
233
234
            raise ValueError("Real and imaginary file sizes do not match!")

        if real_size == 0:
            raise ValueError('Real and imaginary files were empty')
Somnath, Suhas's avatar
Somnath, Suhas committed
235

236
        # Check here if a second channel for current is present
237
238
        # Look for the file containing the current data

ssomnath's avatar
ssomnath committed
239
        if self._verbose:
240
            print('\tLooking for secondary channels')
241
242
        file_names = listdir(folder_path)
        aux_files = []
Unknown's avatar
Unknown committed
243
        current_data_exists = False
244
245
246
247
248
249
250
        for fname in file_names:
            if 'AI2' in fname:
                if 'write' in fname:
                    current_file = path.join(folder_path, fname)
                    current_data_exists=True
                aux_files.append(path.join(folder_path, fname))

Unknown's avatar
Unknown committed
251
        add_pix = False
Somnath, Suhas's avatar
Somnath, Suhas committed
252
253
        num_rows = int(parm_dict['grid_num_rows'])
        num_cols = int(parm_dict['grid_num_cols'])
ssomnath's avatar
ssomnath committed
254
        if self._verbose:
255
            print('\tRows: {}, Cols: {}'.format(num_rows, num_cols))
Unknown's avatar
Unknown committed
256
257
        num_pix = num_rows * num_cols
        tot_bins = real_size / (num_pix * 4)
Chris Smith's avatar
Chris Smith committed
258
        # Check for case where only a single pixel is missing.
259
260
261
262
        if num_pix == 1:
            check_bins = real_size / (num_pix * 4)
        else:
            check_bins = real_size / ((num_pix - 1) * 4)
Unknown's avatar
Unknown committed
263

ssomnath's avatar
ssomnath committed
264
        if self._verbose:
265
266
267
            print('\tChecking bins: Total: {}, actual: {}'.format(tot_bins,
                                                                  check_bins))

Unknown's avatar
Unknown committed
268
        if tot_bins % 1 and check_bins % 1:
269
270
            raise ValueError('Aborting! Some parameter appears to have '
                             'changed in-between')
Somnath, Suhas's avatar
Somnath, Suhas committed
271
        elif not tot_bins % 1:
Chris Smith's avatar
Chris Smith committed
272
            # Everything's ok
Somnath, Suhas's avatar
Somnath, Suhas committed
273
274
275
            pass
        elif not check_bins % 1:
            tot_bins = check_bins
276
277
            warn('Warning:  A pixel seems to be missing from the data. '
                 'File will be padded with zeros.')
Unknown's avatar
Unknown committed
278
279
280
281
            add_pix = True

        tot_bins = int(tot_bins) * tot_bins_multiplier

Somnath, Suhas's avatar
Somnath, Suhas committed
282
        if 'parm_mat' in path_dict.keys():
ssomnath's avatar
ssomnath committed
283
            if self._verbose:
284
                print('\treading BE arrays from parameters text file')
285
            bin_inds, bin_freqs, bin_FFT, ex_wfm = self._read_parms_mat(path_dict['parm_mat'], isBEPS)
Somnath, Suhas's avatar
Somnath, Suhas committed
286
        elif 'old_mat_parms' in path_dict.keys():
ssomnath's avatar
ssomnath committed
287
            if self._verbose:
288
                print('\treading BE arrays from old mat text file')
289
            bin_inds, bin_freqs, bin_FFT, ex_wfm, dc_amp_vec = self._read_old_mat_be_vecs(path_dict['old_mat_parms'], verbose=verbose)
Somnath, Suhas's avatar
Somnath, Suhas committed
290
        else:
ssomnath's avatar
ssomnath committed
291
            if self._verbose:
292
                print('\tGenerating dummy BE arrays')
Unknown's avatar
Unknown committed
293
            band_width = parm_dict['BE_band_width_[Hz]'] * (0.5 - parm_dict['BE_band_edge_trim'])
Somnath, Suhas's avatar
Somnath, Suhas committed
294
            st_f = parm_dict['BE_center_frequency_[Hz]'] - band_width
Unknown's avatar
Unknown committed
295
            en_f = parm_dict['BE_center_frequency_[Hz]'] + band_width
Somnath, Suhas's avatar
Somnath, Suhas committed
296
            bin_freqs = np.linspace(st_f, en_f, tot_bins, dtype=np.float32)
Unknown's avatar
Unknown committed
297

298
            warn('No parms .mat file found.... Filling dummy values into ancillary datasets.')
Somnath, Suhas's avatar
Somnath, Suhas committed
299
300
301
            bin_inds = np.zeros(shape=tot_bins, dtype=np.int32)
            bin_FFT = np.zeros(shape=tot_bins, dtype=np.complex64)
            ex_wfm = np.zeros(shape=100, dtype=np.float32)
Unknown's avatar
Unknown committed
302

Somnath, Suhas's avatar
Somnath, Suhas committed
303
304
305
306
307
        # Forcing standardized datatypes:
        bin_inds = np.int32(bin_inds)
        bin_freqs = np.float32(bin_freqs)
        bin_FFT = np.complex64(bin_FFT)
        ex_wfm = np.float32(ex_wfm)
308

Somnath, Suhas's avatar
Somnath, Suhas committed
309
        self.FFT_BE_wave = bin_FFT
310

Somnath, Suhas's avatar
Somnath, Suhas committed
311
        if isBEPS:
ssomnath's avatar
ssomnath committed
312
            if self._verbose:
313
                print('\tBuilding UDVS table for BEPS')
ssomnath's avatar
ssomnath committed
314
            UDVS_labs, UDVS_units, UDVS_mat = self._build_udvs_table(parm_dict)
Unknown's avatar
Unknown committed
315

ssomnath's avatar
ssomnath committed
316
            if self._verbose:
317
                print('\tTrimming UDVS table to remove unused plot group columns')
318

319
            UDVS_mat, UDVS_labs, UDVS_units = trimUDVS(UDVS_mat, UDVS_labs, UDVS_units, ignored_plt_grps)
Unknown's avatar
Unknown committed
320

321
            old_spec_inds = np.zeros(shape=(2, tot_bins), dtype=INDICES_DTYPE)
Unknown's avatar
Unknown committed
322

323
            # Will assume that all excitation waveforms have same num of bins
Unknown's avatar
Unknown committed
324
325
            num_actual_udvs_steps = UDVS_mat.shape[0] / udvs_denom
            bins_per_step = tot_bins / num_actual_udvs_steps
ssomnath's avatar
ssomnath committed
326
            if self._verbose:
327
328
                print('\t# UDVS steps: {}, # bins/step: {}'
                      ''.format(num_actual_udvs_steps, bins_per_step))
Unknown's avatar
Unknown committed
329

Somnath, Suhas's avatar
Somnath, Suhas committed
330
            if bins_per_step % 1:
Somnath, Suhas's avatar
Somnath, Suhas committed
331
332
                print('UDVS mat shape: {}, total bins: {}, bins per step: {}'.format(UDVS_mat.shape, tot_bins,
                                                                                     bins_per_step))
333
                raise ValueError('Non integer number of bins per step!')
Unknown's avatar
Unknown committed
334

Somnath, Suhas's avatar
Somnath, Suhas committed
335
336
            bins_per_step = int(bins_per_step)
            num_actual_udvs_steps = int(num_actual_udvs_steps)
Unknown's avatar
Unknown committed
337
338
339

            stind = 0
            for step_index in range(UDVS_mat.shape[0]):
Unknown's avatar
Unknown committed
340
341
342
                if UDVS_mat[step_index, 2] < 1E-3:  # invalid AC amplitude
                    continue
                # Bin step
343
                old_spec_inds[0, stind:stind + bins_per_step] = np.arange(bins_per_step, dtype=INDICES_DTYPE)
Unknown's avatar
Unknown committed
344
                # UDVS step
345
                old_spec_inds[1, stind:stind + bins_per_step] = step_index * np.ones(bins_per_step, dtype=INDICES_DTYPE)
Somnath, Suhas's avatar
Somnath, Suhas committed
346
                stind += bins_per_step
Somnath, Suhas's avatar
Somnath, Suhas committed
347
            del stind, step_index
Unknown's avatar
Unknown committed
348

Somnath, Suhas's avatar
Somnath, Suhas committed
349
        else:  # BE Line
ssomnath's avatar
ssomnath committed
350
            if self._verbose:
351
                print('\tPreparing supporting variables since BE-Line')
Somnath, Suhas's avatar
Somnath, Suhas committed
352
            self.signal_type = 1
Somnath, Suhas's avatar
Somnath, Suhas committed
353
            self.expt_type = 1  # Stephen has not used this index for some reason
Somnath, Suhas's avatar
Somnath, Suhas committed
354
355
            num_actual_udvs_steps = 1
            bins_per_step = tot_bins
Somnath, Suhas's avatar
Somnath, Suhas committed
356
            UDVS_labs = ['step_num', 'dc_offset', 'ac_amp', 'wave_type', 'wave_mod', 'be-line']
Somnath, Suhas's avatar
Somnath, Suhas committed
357
            UDVS_units = ['', 'V', 'A', '', '', '']
Somnath, Suhas's avatar
Somnath, Suhas committed
358
359
            UDVS_mat = np.array([1, 0, parm_dict['BE_amplitude_[V]'], 1, 1, 1],
                                dtype=np.float32).reshape(1, len(UDVS_labs))
Somnath, Suhas's avatar
Somnath, Suhas committed
360

Chris Smith's avatar
Chris Smith committed
361
362
            old_spec_inds = np.vstack((np.arange(tot_bins, dtype=INDICES_DTYPE),
                                       np.zeros(tot_bins, dtype=INDICES_DTYPE)))
Unknown's avatar
Unknown committed
363

364
        # legacy parmeters inserted for BEAM
Somnath, Suhas's avatar
Somnath, Suhas committed
365
366
        parm_dict['num_bins'] = tot_bins
        parm_dict['num_pix'] = num_pix
367
        parm_dict['num_udvs_steps'] = num_actual_udvs_steps
Rama Vasudevan's avatar
Rama Vasudevan committed
368
        parm_dict['num_steps'] = num_actual_udvs_steps
Unknown's avatar
Unknown committed
369

ssomnath's avatar
ssomnath committed
370
        if self._verbose:
371
            print('\tPreparing UDVS slices for region references')
Somnath, Suhas's avatar
Somnath, Suhas committed
372
        udvs_slices = dict()
Somnath, Suhas's avatar
Somnath, Suhas committed
373
        for col_ind, col_name in enumerate(UDVS_labs):
Unknown's avatar
Unknown committed
374
375
            udvs_slices[col_name] = (slice(None), slice(col_ind, col_ind + 1))

Somnath, Suhas's avatar
Somnath, Suhas committed
376
        # Need to add the Bin Waveform type - infer from UDVS        
Unknown's avatar
Unknown committed
377
        exec_bin_vec = self.signal_type * np.ones(len(bin_inds), dtype=np.int32)
Somnath, Suhas's avatar
Somnath, Suhas committed
378
379

        if self.expt_type == 2:
ssomnath's avatar
ssomnath committed
380
            if self._verbose:
381
                print('\tExperiment type = 2. Doubling BE vectors')
Unknown's avatar
Unknown committed
382
            exec_bin_vec = np.hstack((exec_bin_vec, -1 * exec_bin_vec))
Somnath, Suhas's avatar
Somnath, Suhas committed
383
384
            bin_inds = np.hstack((bin_inds, bin_inds))
            bin_freqs = np.hstack((bin_freqs, bin_freqs))
Somnath, Suhas's avatar
Somnath, Suhas committed
385
            # This is wrong but I don't know what else to do
Somnath, Suhas's avatar
Somnath, Suhas committed
386
            bin_FFT = np.hstack((bin_FFT, bin_FFT))
Unknown's avatar
Unknown committed
387

Somnath, Suhas's avatar
Somnath, Suhas committed
388
        # Create Spectroscopic Values and Spectroscopic Values Labels datasets
389
        # This is an old and legacy way of doing things. Ideally, all we would need ot do is just get the unit values
ssomnath's avatar
ssomnath committed
390
        if self._verbose:
391
            print('\tCalculating spectroscopic values')
ssomnath's avatar
ssomnath committed
392
393
394
395
        ret_vals = createSpecVals(UDVS_mat, old_spec_inds, bin_freqs,
                                  exec_bin_vec, parm_dict, UDVS_labs,
                                  UDVS_units, verbose=verbose)
        spec_vals, spec_inds, spec_vals_labs, spec_vals_units, spec_vals_labs_names = ret_vals
396

ssomnath's avatar
ssomnath committed
397
        if self._verbose:
398
            print('\t\tspec_vals_labs: {}'.format(spec_vals_labs))
399
400
401
            unit_vals = get_unit_values(spec_inds, spec_vals,
                                        all_dim_names=spec_vals_labs,
                                        is_spec=True, verbose=False)
402
403
404
405
            print('\tUnit spectroscopic values')
            for key, val in unit_vals.items():
                print('\t\t{} : length: {}, values:\n\t\t\t{}'.format(key, len(val), val))

406
407
408
409
        if spec_inds.shape[1] != tot_bins:
            raise ValueError('Second axis of spectroscopic indices: {} not '
                             'matching with second axis of the expected main '
                             'dataset: {}'.format(spec_inds.shape, tot_bins))
410

411
412
413
414
        # Not sure what is happening here but this should work.
        spec_dim_dict = dict()
        for entry in spec_vals_labs_names:
            spec_dim_dict[entry[0] + '_parameters'] = entry[1]
Chris Smith's avatar
Chris Smith committed
415

Somnath, Suhas's avatar
Somnath, Suhas committed
416
417
418
        spec_vals_slices = dict()

        for row_ind, row_name in enumerate(spec_vals_labs):
Unknown's avatar
Unknown committed
419
            spec_vals_slices[row_name] = (slice(row_ind, row_ind + 1), slice(None))
Somnath, Suhas's avatar
Somnath, Suhas committed
420

421
        if path.exists(h5_path):
ssomnath's avatar
ssomnath committed
422
            if self._verbose:
423
                print('\tRemoving existing / old translated file: ' + h5_path)
424
            remove(h5_path)
Chris Smith's avatar
Chris Smith committed
425

426
        # First create the file
ssomnath's avatar
ssomnath committed
427
        h5_f = h5py.File(h5_path, mode='w')
Somnath, Suhas's avatar
Somnath, Suhas committed
428

429
        # Then write root level attributes
430
        global_parms = dict()
Somnath, Suhas's avatar
Somnath, Suhas committed
431
432
        global_parms['grid_size_x'] = parm_dict['grid_num_cols']
        global_parms['grid_size_y'] = parm_dict['grid_num_rows']
Somnath, Suhas's avatar
Somnath, Suhas committed
433
434
435
436
        try:
            global_parms['experiment_date'] = parm_dict['File_date_and_time']
        except KeyError:
            global_parms['experiment_date'] = '1:1:1'
Chris Smith's avatar
Chris Smith committed
437

Somnath, Suhas's avatar
Somnath, Suhas committed
438
        # assuming that the experiment was completed:
Unknown's avatar
Unknown committed
439
440
        global_parms['current_position_x'] = parm_dict['grid_num_cols'] - 1
        global_parms['current_position_y'] = parm_dict['grid_num_rows'] - 1
Somnath, Suhas's avatar
Somnath, Suhas committed
441
        global_parms['data_type'] = parm_dict['data_type']
Somnath, Suhas's avatar
Somnath, Suhas committed
442
        global_parms['translator'] = 'ODF'
ssomnath's avatar
ssomnath committed
443
        if self._verbose:
444
            print('\tWriting attributes to HDF5 file root')
445
        write_simple_attrs(h5_f, global_parms)
446
        write_book_keeping_attrs(h5_f)
Unknown's avatar
Unknown committed
447

448
449
        # Then create the measurement group
        h5_meas_group = create_indexed_group(h5_f, 'Measurement')
Unknown's avatar
Unknown committed
450

451
        # Write attributes at the measurement group level
ssomnath's avatar
ssomnath committed
452
        if self._verbose:
453
            print('\twriting attributes to Measurement group')
454
        write_simple_attrs(h5_meas_group, parm_dict)
Unknown's avatar
Unknown committed
455

456
457
        # Create the Channel group
        h5_chan_grp = create_indexed_group(h5_meas_group, 'Channel')
Unknown's avatar
Unknown committed
458

459
        # Write channel group attributes
Rama Vasudevan's avatar
Rama Vasudevan committed
460
461
        write_simple_attrs(h5_chan_grp, {'Channel_Input': 'IO_Analog_Input_1',
                                         'channel_type': 'BE'})
Unknown's avatar
Unknown committed
462

463
        # Now the datasets!
ssomnath's avatar
ssomnath committed
464
        if self._verbose:
465
            print('\tCreating ancillary datasets')
Chris Smith's avatar
Chris Smith committed
466
        h5_chan_grp.create_dataset('Excitation_Waveform', data=ex_wfm)
Unknown's avatar
Unknown committed
467

468
        h5_udvs = h5_chan_grp.create_dataset('UDVS', data=UDVS_mat)
ssomnath's avatar
ssomnath committed
469
470
471
        # TODO: Avoid using region references in USID
        write_region_references(h5_udvs, udvs_slices, add_labels_attr=True, verbose=self._verbose)
        write_simple_attrs(h5_udvs, {'units': UDVS_units}, verbose=False)
472

Chris Smith's avatar
Chris Smith committed
473
        h5_chan_grp.create_dataset('UDVS_Indices', data=old_spec_inds[1])
474

Chris Smith's avatar
Chris Smith committed
475
476
        h5_chan_grp.create_dataset('Bin_Step', data=np.arange(bins_per_step, dtype=INDICES_DTYPE),
                                   dtype=INDICES_DTYPE)
477

Chris Smith's avatar
Chris Smith committed
478
479
480
481
        h5_chan_grp.create_dataset('Bin_Indices', data=bin_inds, dtype=INDICES_DTYPE)
        h5_chan_grp.create_dataset('Bin_Frequencies', data=bin_freqs)
        h5_chan_grp.create_dataset('Bin_FFT', data=bin_FFT)
        h5_chan_grp.create_dataset('Bin_Wfm_Type', data=exec_bin_vec)
482

ssomnath's avatar
ssomnath committed
483
        if self._verbose:
484
485
486
487
            print('\tWriting Position datasets')

        pos_dims = [Dimension('X', 'm', np.arange(num_cols)),
                    Dimension('Y', 'm', np.arange(num_rows))]
ssomnath's avatar
ssomnath committed
488
489
        h5_pos_ind, h5_pos_val = write_ind_val_dsets(h5_chan_grp, pos_dims, is_spectral=False, verbose=self._verbose)
        if self._verbose:
490
            print('\tPosition datasets of shape: {}'.format(h5_pos_ind.shape))
491

ssomnath's avatar
ssomnath committed
492
        if self._verbose:
493
            print('\tWriting Spectroscopic datasets of shape: {}'.format(spec_inds.shape))
494
495
496
        h5_spec_inds = h5_chan_grp.create_dataset('Spectroscopic_Indices', data=spec_inds, dtype=INDICES_DTYPE)        
        h5_spec_vals = h5_chan_grp.create_dataset('Spectroscopic_Values', data=np.array(spec_vals), dtype=VALUES_DTYPE)
        for dset in [h5_spec_inds, h5_spec_vals]:
ssomnath's avatar
ssomnath committed
497
498
            write_region_references(dset, spec_vals_slices, add_labels_attr=True, verbose=self._verbose)
            write_simple_attrs(dset, {'units': spec_vals_units}, verbose=False)
499
            write_simple_attrs(dset, spec_dim_dict)
500
501

        # Noise floor should be of shape: (udvs_steps x 3 x positions)
ssomnath's avatar
ssomnath committed
502
        if self._verbose:
503
            print('\tWriting noise floor dataset')
Chris Smith's avatar
Chris Smith committed
504
505
        h5_chan_grp.create_dataset('Noise_Floor', (num_pix, num_actual_udvs_steps), dtype=nf32,
                                   chunks=(1, num_actual_udvs_steps))
506
507
508
509
510
511
512
513
514
515
516

        """
        New Method for chunking the Main_Data dataset.  Chunking is now done in N-by-N squares
        of UDVS steps by pixels.  N is determined dynamically based on the dimensions of the
        dataset.  Currently it is set such that individual chunks are less than 10kB in size.

        Chris Smith -- csmith55@utk.edu
        """
        BEPS_chunks = calc_chunks([num_pix, tot_bins],
                                  np.complex64(0).itemsize,
                                  unit_chunks=(1, bins_per_step))
ssomnath's avatar
ssomnath committed
517
        if self._verbose:
518
            print('\tHDF5 dataset will have chunks of size: {}'.format(BEPS_chunks))
519
            print('\tCreating empty main dataset of shape: ({}, {})'.format(num_pix, tot_bins))
520
521
522
        self.h5_raw = write_main_dataset(h5_chan_grp, (num_pix, tot_bins), 'Raw_Data', 'Piezoresponse', 'V', None, None,
                                         dtype=np.complex64, chunks=BEPS_chunks, compression='gzip',
                                         h5_pos_inds=h5_pos_ind, h5_pos_vals=h5_pos_val, h5_spec_inds=h5_spec_inds,
ssomnath's avatar
ssomnath committed
523
                                         h5_spec_vals=h5_spec_vals, verbose=self._verbose)
Somnath, Suhas's avatar
Somnath, Suhas committed
524

ssomnath's avatar
ssomnath committed
525
        if self._verbose:
526
527
            print('\tReading data from binary data files into raw HDF5')
        self._read_data(UDVS_mat, parm_dict, path_dict, real_size, isBEPS,
ssomnath's avatar
ssomnath committed
528
                        add_pix)
Unknown's avatar
Unknown committed
529

ssomnath's avatar
ssomnath committed
530
        if self._verbose:
531
            print('\tGenerating plot groups')
532
        generatePlotGroups(self.h5_raw, self.mean_resp, folder_path, basename,
Somnath, Suhas's avatar
Somnath, Suhas committed
533
                           self.max_resp, self.min_resp, max_mem_mb=self.max_ram,
Somnath, Suhas's avatar
Somnath, Suhas committed
534
                           spec_label=spec_label, show_plots=show_plots, save_plots=save_plots,
ssomnath's avatar
ssomnath committed
535
536
                           do_histogram=do_histogram, debug=self._verbose)
        if self._verbose:
537
            print('\tUpgrading to USIDataset')
538
        self.h5_raw = USIDataset(self.h5_raw)
Unknown's avatar
Unknown committed
539
540
541

        # Go ahead and read the current data in the second (current) channel
        if current_data_exists:                     #If a .dat file matches
ssomnath's avatar
ssomnath committed
542
            if self._verbose:
543
                print('\tReading data in secondary channels (current)')
544
            self._read_secondary_channel(h5_meas_group, aux_files)
545

ssomnath's avatar
ssomnath committed
546
        if self._verbose:
547
            print('\tClosing HDF5 file')
548
        h5_f.close()
Unknown's avatar
Unknown committed
549

Somnath, Suhas's avatar
Somnath, Suhas committed
550
        return h5_path
Chris Smith's avatar
Chris Smith committed
551

552
    def _read_data(self, UDVS_mat, parm_dict, path_dict, real_size, isBEPS,
ssomnath's avatar
ssomnath committed
553
                   add_pix):
Chris Smith's avatar
Chris Smith committed
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
        """
        Checks if the data is BEPS or BELine and calls the correct function to read the data from
        file

        Parameters
        ----------
        UDVS_mat : numpy.ndarray of float
            UDVS table
        parm_dict : dict
            Experimental parameters
        path_dict : dict
            Dictionary of data files to be read
        real_size : dict
            Size of each data file
        isBEPS : boolean
            Is the data BEPS
        add_pix : boolean
            Does the reader need to add extra pixels to the end of the dataset

        Returns
        -------
        None
        """
        # Now read the raw data files:
        if not isBEPS:
            # Do this for all BE-Line (always small enough to read in one shot)
ssomnath's avatar
ssomnath committed
580
            if self._verbose:
581
                print('\t\tReading all raw data for BE-Line in one shot')
582
583
            self._quick_read_data(path_dict['read_real'],
                                  path_dict['read_imag'],
ssomnath's avatar
ssomnath committed
584
                                  parm_dict['num_udvs_steps'])
585
586
        elif real_size < self.max_ram and \
                parm_dict['VS_measure_in_field_loops'] == 'out-of-field':
Chris Smith's avatar
Chris Smith committed
587
            # Do this for out-of-field BEPS ONLY that is also small (256 MB)
ssomnath's avatar
ssomnath committed
588
            if self._verbose:
589
590
591
                print('\t\tReading all raw BEPS (out-of-field) data at once')
            self._quick_read_data(path_dict['read_real'],
                                  path_dict['read_imag'],
ssomnath's avatar
ssomnath committed
592
                                  parm_dict['num_udvs_steps'])
593
594
        elif real_size < self.max_ram and \
                parm_dict['VS_measure_in_field_loops'] == 'in-field':
Chris Smith's avatar
Chris Smith committed
595
            # Do this for in-field only
ssomnath's avatar
ssomnath committed
596
            if self._verbose:
597
598
599
                print('\t\tReading all raw BEPS (in-field only) data at once')
            self._quick_read_data(path_dict['write_real'],
                                  path_dict['write_imag'],
ssomnath's avatar
ssomnath committed
600
                                  parm_dict['num_udvs_steps'])
Chris Smith's avatar
Chris Smith committed
601
602
        else:
            # Large BEPS datasets OR those with in-and-out of field
ssomnath's avatar
ssomnath committed
603
            if self._verbose:
604
605
606
607
608
                print('\t\tReading all raw data for in-and-out-of-field OR '
                      'very large file one pixel at a time')
            self._read_beps_data(path_dict, UDVS_mat.shape[0],
                                 parm_dict['VS_measure_in_field_loops'],
                                 add_pix)
609
        self.h5_raw.file.flush()
Chris Smith's avatar
Chris Smith committed
610

611
    def _read_beps_data(self, path_dict, udvs_steps, mode, add_pixel=False):
Somnath, Suhas's avatar
Somnath, Suhas committed
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
        """
        Reads the imaginary and real data files pixelwise and writes to the H5 file 
        
        Parameters 
        --------------------
        path_dict : dictionary
            Dictionary containing the absolute paths of the real and imaginary data files
        udvs_steps : unsigned int
            Number of UDVS steps
        mode : String / Unicode
            'in-field', 'out-of-field', or 'in and out-of-field'
        add_pixel : boolean. (Optional; default is False)
            If an empty pixel worth of data should be written to the end             
        
        Returns 
        -------------------- 
        None
        """
Unknown's avatar
Unknown committed
630

Somnath, Suhas's avatar
Somnath, Suhas committed
631
        print('---- reading pixel-by-pixel ----------')
Unknown's avatar
Unknown committed
632
633
634
635

        bytes_per_pix = self.h5_raw.shape[1] * 4
        step_size = self.h5_raw.shape[1] / udvs_steps

Somnath, Suhas's avatar
Somnath, Suhas committed
636
        if mode == 'out-of-field':
Unknown's avatar
Unknown committed
637
            parsers = [BEodfParser(path_dict['read_real'], path_dict['read_imag'],
Somnath, Suhas's avatar
Somnath, Suhas committed
638
                                   self.h5_raw.shape[0], bytes_per_pix)]
Somnath, Suhas's avatar
Somnath, Suhas committed
639
        elif mode == 'in-field':
Unknown's avatar
Unknown committed
640
            parsers = [BEodfParser(path_dict['write_real'], path_dict['write_imag'],
Somnath, Suhas's avatar
Somnath, Suhas committed
641
                                   self.h5_raw.shape[0], bytes_per_pix)]
Somnath, Suhas's avatar
Somnath, Suhas committed
642
643
        elif mode == 'in and out-of-field':
            # each file will only have half the udvs steps:
Unknown's avatar
Unknown committed
644
            if 0.5 * udvs_steps % 1:
645
646
                raise ValueError('Odd number of UDVS')

Unknown's avatar
Unknown committed
647
            udvs_steps = int(0.5 * udvs_steps)
Somnath, Suhas's avatar
Somnath, Suhas committed
648
            # be careful - each pair contains only half the necessary bins - so read half
Unknown's avatar
Unknown committed
649
            parsers = [BEodfParser(path_dict['write_real'], path_dict['write_imag'],
Somnath, Suhas's avatar
Somnath, Suhas committed
650
                                   self.h5_raw.shape[0], int(bytes_per_pix / 2)),
Unknown's avatar
Unknown committed
651
652
653
                       BEodfParser(path_dict['read_real'], path_dict['read_imag'],
                                   self.h5_raw.shape[0], int(bytes_per_pix / 2))]

Somnath, Suhas's avatar
Somnath, Suhas committed
654
            if step_size % 1:
655
656
                raise ValueError('strange number of bins per UDVS step. Exiting')

Somnath, Suhas's avatar
Somnath, Suhas committed
657
            step_size = int(step_size)
658

659
660
        rand_spectra = self._get_random_spectra(parsers, self.h5_raw.shape[0], udvs_steps, step_size,
                                                num_spectra=self.num_rand_spectra)
661
        take_conjugate = requires_conjugate(rand_spectra, cores=self._cores)
662

Somnath, Suhas's avatar
Somnath, Suhas committed
663
664
665
666
        self.mean_resp = np.zeros(shape=(self.h5_raw.shape[1]), dtype=np.complex64)
        self.max_resp = np.zeros(shape=(self.h5_raw.shape[0]), dtype=np.float32)
        self.min_resp = np.zeros(shape=(self.h5_raw.shape[0]), dtype=np.float32)

Unknown's avatar
Unknown committed
667
        numpix = self.h5_raw.shape[0]
Somnath, Suhas's avatar
Somnath, Suhas committed
668
669
670
        """ 
        Don't try to do the last step if a pixel is missing.   
        This will be handled after the loop. 
Unknown's avatar
Unknown committed
671
672
673
674
        """
        if add_pixel:
            numpix -= 1

Somnath, Suhas's avatar
Somnath, Suhas committed
675
        for pix_indx in range(numpix):
Somnath, Suhas's avatar
Somnath, Suhas committed
676
            if self.h5_raw.shape[0] > 5:
Unknown's avatar
Unknown committed
677
678
679
                if pix_indx % int(round(self.h5_raw.shape[0] / 10)) == 0:
                    print('Reading... {} complete'.format(round(100 * pix_indx / self.h5_raw.shape[0])))

Somnath, Suhas's avatar
Somnath, Suhas committed
680
681
682
            # get the raw stream from each parser
            pxl_data = list()
            for prsr in parsers:
Somnath, Suhas's avatar
Somnath, Suhas committed
683
                pxl_data.append(prsr.read_pixel())
Unknown's avatar
Unknown committed
684

Somnath, Suhas's avatar
Somnath, Suhas committed
685
686
687
688
689
            # interleave if both in and out of field
            # we are ignoring user defined possibilities...
            if mode == 'in and out-of-field':
                in_fld = pxl_data[0]
                out_fld = pxl_data[1]
Unknown's avatar
Unknown committed
690

Somnath, Suhas's avatar
Somnath, Suhas committed
691
692
                in_fld_2 = in_fld.reshape(udvs_steps, step_size)
                out_fld_2 = out_fld.reshape(udvs_steps, step_size)
Unknown's avatar
Unknown committed
693
                raw_mat = np.empty((udvs_steps * 2, step_size), dtype=out_fld.dtype)
Somnath, Suhas's avatar
Somnath, Suhas committed
694
695
                raw_mat[0::2, :] = in_fld_2
                raw_mat[1::2, :] = out_fld_2
Somnath, Suhas's avatar
Somnath, Suhas committed
696
697
                raw_vec = raw_mat.reshape(in_fld.size + out_fld.size).transpose()
            else:
Somnath, Suhas's avatar
Somnath, Suhas committed
698
                raw_vec = pxl_data[0]  # only one parser
Somnath, Suhas's avatar
Somnath, Suhas committed
699
700
            self.max_resp[pix_indx] = np.max(np.abs(raw_vec))
            self.min_resp[pix_indx] = np.min(np.abs(raw_vec))
Unknown's avatar
Unknown committed
701
            self.mean_resp = (1 / (pix_indx + 1)) * (raw_vec + pix_indx * self.mean_resp)
702
703
704

            if take_conjugate:
                raw_vec = np.conjugate(raw_vec)
705
            self.h5_raw[pix_indx, :] = np.complex64(raw_vec[:])
706
            self.h5_raw.file.flush()
Unknown's avatar
Unknown committed
707

Somnath, Suhas's avatar
Somnath, Suhas committed
708
        # Add zeros to main_data for the missing pixel. 
Unknown's avatar
Unknown committed
709
710
711
        if add_pixel:
            self.h5_raw[-1, :] = 0 + 0j

Somnath, Suhas's avatar
Somnath, Suhas committed
712
        print('---- Finished reading files -----')
713

ssomnath's avatar
ssomnath committed
714
    def _quick_read_data(self, real_path, imag_path, udvs_steps):
Somnath, Suhas's avatar
Somnath, Suhas committed
715
        """
Somnath, Suhas's avatar
Somnath, Suhas committed
716
717
718
719
720
721
722
723
        Returns information about the excitation BE waveform present in the .mat file

        Parameters
        -----------
        real_path : String / Unicode
            Absolute file path of the real data file
        imag_path : String / Unicode
            Absolute file path of the real data file
724
725
        udvs_steps : unsigned int
            Number of UDVS steps
Somnath, Suhas's avatar
Somnath, Suhas committed
726
        """
727
728
        parser = BEodfParser(real_path, imag_path, self.h5_raw.shape[0],
                             self.h5_raw.shape[1] * 4)
729
730

        step_size = self.h5_raw.shape[1] / udvs_steps
731
732
733
734
        rand_spectra = self._get_random_spectra([parser],
                                                self.h5_raw.shape[0],
                                                udvs_steps, step_size,
                                                num_spectra=self.num_rand_spectra,
ssomnath's avatar
ssomnath committed
735
736
                                                verbose=self._verbose)
        if self._verbose:
737
            print('\t\t\tChecking if conjugate is required')
738
        take_conjugate = requires_conjugate(rand_spectra, cores=self._cores)
Somnath, Suhas's avatar
Somnath, Suhas committed
739
        raw_vec = parser.read_all_data()
740
        if take_conjugate:
ssomnath's avatar
ssomnath committed
741
            if self._verbose:
742
                print('\t'*4 + 'Taking conjugate for positive quality factors')
743
            raw_vec = np.conjugate(raw_vec)
Unknown's avatar
Unknown committed
744

Rama Vasudevan's avatar
Rama Vasudevan committed
745
746
        if raw_vec.shape != np.prod(self.h5_raw.shape):
            percentage_padded = 100 * (np.prod(self.h5_raw.shape) - raw_vec.shape) / np.prod(self.h5_raw.shape)
747
            warn('Warning! Raw data length {} is not matching placeholder length {}. '
Rama Vasudevan's avatar
Rama Vasudevan committed
748
749
750
751
752
753
754
755
756
                  'Padding zeros for {}% of the data!'.format(raw_vec.shape, np.prod(self.h5_raw.shape), percentage_padded))

            padded_raw_vec = np.zeros(np.prod(self.h5_raw.shape), dtype = np.complex64)

            padded_raw_vec[:raw_vec.shape[0]] = raw_vec
            raw_mat = padded_raw_vec.reshape(self.h5_raw.shape[0], self.h5_raw.shape[1])
        else:
            raw_mat = raw_vec.reshape(self.h5_raw.shape[0], self.h5_raw.shape[1])

Somnath, Suhas's avatar
Somnath, Suhas committed
757
        # Write to the h5 dataset:
Somnath, Suhas's avatar
Somnath, Suhas committed
758
759
760
        self.mean_resp = np.mean(raw_mat, axis=0)
        self.max_resp = np.amax(np.abs(raw_mat), axis=0)
        self.min_resp = np.amin(np.abs(raw_mat), axis=0)
761
        self.h5_raw[:, :] = np.complex64(raw_mat)
762
        self.h5_raw.file.flush()
Somnath, Suhas's avatar
Somnath, Suhas committed
763

Unknown's avatar
Unknown committed
764
765
        print('---- Finished reading files -----')

766
767
    @staticmethod
    def _parse_file_path(data_filepath):
Somnath, Suhas's avatar
Somnath, Suhas committed
768
769
770
771
772
773
774
        """
        Returns the basename and a dictionary containing the absolute file paths for the
        real and imaginary data files, text and mat parameter files in a dictionary
        
        Parameters 
        --------------------
        data_filepath: String / Unicode
Somnath, Suhas's avatar
Somnath, Suhas committed
775
            Absolute path of any file in the same directory as the .dat files
Somnath, Suhas's avatar
Somnath, Suhas committed
776
777
778
779
780
781
782
783
784
        
        Returns 
        --------------------
        basename : String / Unicode
            Basename of the dataset      
        path_dict : Dictionary
            Dictionary containing absolute paths of all necessary data and parameter files
        """
        (folder_path, basename) = path.split(data_filepath)
Unknown's avatar
Unknown committed
785
        (super_folder, basename) = path.split(folder_path)
Somnath, Suhas's avatar
Somnath, Suhas committed
786

787
788
        if basename.endswith('_d') or basename.endswith('_c'):
            # Old old data format where the folder ended with a _d or _c to denote a completed spectroscopic run
Somnath, Suhas's avatar
Somnath, Suhas committed
789
790
791
792
793
794
795
796
            basename = basename[:-2]
        """
        A single pair of real and imaginary files are / were generated for:
            BE-Line and BEPS (compiled version only generated out-of-field or 'read')
        Two pairs of real and imaginary files were generated for later BEPS datasets
            These have 'read' and 'write' prefixes to denote out or in field respectively
        """
        path_dict = dict()
Unknown's avatar
Unknown committed
797

Somnath, Suhas's avatar
Somnath, Suhas committed
798
        for file_name in listdir(folder_path):
Chris Smith's avatar
Chris Smith committed
799
            abs_path = path.join(folder_path, file_name)
Somnath, Suhas's avatar
Somnath, Suhas committed
800
801
802
803
804
            if file_name.endswith('.txt') and file_name.find('parm') > 0:
                path_dict['parm_txt'] = abs_path
            elif file_name.find('.mat') > 0:
                if file_name.find('more_parms') > 0:
                    path_dict['parm_mat'] = abs_path
Unknown's avatar
Unknown committed
805
                elif file_name == (basename + '.mat'):
Somnath, Suhas's avatar
Somnath, Suhas committed
806
807
808
809
810
811
812
813
814
815
816
817
                    path_dict['old_mat_parms'] = abs_path
            elif file_name.endswith('.dat'):
                # Need to account for the second AI channel here
                file_tag = 'read'
                if file_name.find('write') > 0:
                    file_tag = 'write'
                if file_name.find('real') > 0:
                    file_tag += '_real'
                elif file_name.find('imag') > 0:
                    file_tag += '_imag'
                path_dict[file_tag] = abs_path

Chris Smith's avatar
Chris Smith committed
818
        return basename, path_dict
Somnath, Suhas's avatar
Somnath, Suhas committed
819

ssomnath's avatar
ssomnath committed
820
    def _read_secondary_channel(self, h5_meas_group, aux_file_path):
821
822
823
824
825
826
827
828
829
830
831
832
        """
        Reads secondary channel stored in AI .mat file
        Currently works for in-field measurements only, but should be updated to
        include both in and out of field measurements

        Parameters
        -----------
        h5_meas_group : h5 group
            Reference to the Measurement group
        aux_file_path : String / Unicode
            Absolute file path of the secondary channel file.
        """
ssomnath's avatar
ssomnath committed
833
        if self._verbose:
834
            print('\t---------- Reading Secondary Channel  ----------')
835
        if isinstance(aux_file_path, (list, tuple)):
836
837
838
839
            aux_file_paths = aux_file_path
        else:
            aux_file_paths = list(aux_file_path)

840
        is_in_out_field = 'Field' in self.h5_raw.spec_dim_labels
841

842
843
844
845
846
847
848
849
850
        if not is_in_out_field and len(aux_file_paths) > 1:
            # TODO: Find a better way to handle this
            warn('\t\tField was not varied but found more than one file for '
                 'secondary channel: {}.\n\t\tResults will be overwritten'
                 ''.format([path.split(item)[-1] for item in aux_file_paths]))
        elif is_in_out_field and len(aux_file_paths) == 1:
            warn('\t\tField was varied but only one data file for secondary'
                 'channel was found. Half the data will be zeros')