be_odf.py 74.8 KB
Newer Older
Somnath, Suhas's avatar
Somnath, Suhas committed
1
2
3
4
5
6
7
# -*- coding: utf-8 -*-
"""
Created on Tue Nov  3 15:24:12 2015

@author: Suhas Somnath, Stephen Jesse
"""

8
from __future__ import division, print_function, absolute_import, unicode_literals
9

Somnath, Suhas's avatar
Somnath, Suhas committed
10
from os import path, listdir, remove
11
import sys
12
import datetime
13
from warnings import warn
14
import h5py
Somnath, Suhas's avatar
Somnath, Suhas committed
15
16
import numpy as np
from scipy.io.matlab import loadmat  # To load parameters stored in Matlab .mat file
17

18
from .df_utils.be_utils import trimUDVS, getSpectroscopicParmLabel, parmsToDict, generatePlotGroups, \
19
20
    createSpecVals, requires_conjugate, generate_bipolar_triangular_waveform, \
    infer_bipolar_triangular_fraction_phase, nf32
21
from pyUSID.io.reg_ref import write_region_references
22
from pyUSID.io.translator import Translator
23
from pyUSID.io.write_utils import INDICES_DTYPE, VALUES_DTYPE, Dimension, calc_chunks
24
from pyUSID.io.hdf_utils import write_ind_val_dsets, write_main_dataset, \
25
    create_indexed_group, write_simple_attrs, write_book_keeping_attrs, copy_attributes,\
26
    write_reduced_anc_dsets, get_unit_values
27
from pyUSID.io.usi_data import USIDataset
28
from pyUSID.processing.comp_utils import get_available_memory
29

30
31
32
if sys.version_info.major == 3:
    unicode = str

33

Somnath, Suhas's avatar
Somnath, Suhas committed
34
35
36
37
38
class BEodfTranslator(Translator):
    """
    Translates either the Band Excitation (BE) scan or Band Excitation 
    Polarization Switching (BEPS) data format from the old data format(s) to .h5
    """
Unknown's avatar
Unknown committed
39

Chris Smith's avatar
Chris Smith committed
40
41
42
    def __init__(self, *args, **kwargs):
        super(BEodfTranslator, self).__init__(*args, **kwargs)
        self.h5_raw = None
43
        self.num_rand_spectra = kwargs.pop('num_rand_spectra', 1000)
44
        self._cores = kwargs.pop('cores', None)
Unknown's avatar
Unknown committed
45
46
47
        self.FFT_BE_wave = None
        self.signal_type = None
        self.expt_type = None
48
        self._verbose = False
Chris Smith's avatar
Chris Smith committed
49

50
    @staticmethod
51
    def is_valid_file(data_path):
52
53
54
55
56
        """
        Checks whether the provided file can be read by this translator

        Parameters
        ----------
57
        data_path : str
58
59
60
61
            Path to raw data file

        Returns
        -------
62
63
64
65
        obj : str
            Path to file that will be accepted by the translate() function if
            this translator is indeed capable of translating the provided file.
            Otherwise, None will be returned
66
        """
67
68
69
70
71
72
73
74
        if not isinstance(data_path, (str, unicode)):
            raise TypeError('data_path must be a string')

        ndf = 'newdataformat'

        data_path = path.abspath(data_path)

        if path.isfile(data_path):
75
76
77
78
            ext = data_path.split('.')[-1]
            if ext.lower() not in ['jpg', 'png', 'jpeg', 'tiff', 'mat', 'txt',
                                   'dat', 'xls', 'xlsx']:
                return None
79
80
            # we only care about the folder names at this point...
            data_path, _ = path.split(data_path)
81
82

        # Check if the data is in the new or old format:
83
84
85
86
87
88
89
        # Check one level up:
        _, dir_name = path.split(data_path)
        if dir_name == ndf:
            # Though this translator could also read the files but the NDF Translator is more robust...
            return None
        # Check one level down:
        if ndf in listdir(data_path):
90
            # Though this translator could also read the files but the NDF Translator is more robust...
91
92
93
            return None

        file_path = path.join(data_path, listdir(path=data_path)[0])
94
95

        _, path_dict = BEodfTranslator._parse_file_path(file_path)
96

97
98
        if any([x.find('bigtime_0') > 0 and x.endswith('.dat') for x in path_dict.values()]):
            # This is a G-mode Line experiment:
99
            return None
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
        if any([x.find('bigtime_0') > 0 and x.endswith('.dat') for x in
                path_dict.values()]):
            # This is a G-mode Line experiment:
            return None

        parm_found = any([piece in path_dict.keys() for piece in
                          ['parm_txt', 'old_mat_parms']])
        real_found = any([piece in path_dict.keys() for piece in
                          ['read_real', 'write_real']])
        imag_found = any([piece in path_dict.keys() for piece in
                          ['read_imag', 'write_imag']])

        if parm_found and real_found and imag_found:
            if 'parm_txt' in path_dict.keys():
                return path_dict['parm_txt']
            else:
                return path_dict['old_mat_parms']
117
        else:
118
            return None
119

120
121
    def translate(self, file_path, show_plots=True, save_plots=True,
                  do_histogram=False, verbose=False):
Somnath, Suhas's avatar
Somnath, Suhas committed
122
123
124
125
126
127
128
129
130
131
132
133
134
135
        """
        Translates .dat data file(s) to a single .h5 file
        
        Parameters
        -------------
        file_path : String / Unicode
            Absolute file path for one of the data files. 
            It is assumed that this file is of the OLD data format.
        show_plots : (optional) Boolean
            Whether or not to show intermediate plots
        save_plots : (optional) Boolean
            Whether or not to save plots to disk
        do_histogram : (optional) Boolean
            Whether or not to construct histograms to visualize data quality. Note - this takes a fair amount of time
136
137
        verbose : (optional) Boolean
            Whether or not to print statements
Somnath, Suhas's avatar
Somnath, Suhas committed
138
139
140
141
142
143
            
        Returns
        ----------
        h5_path : String / Unicode
            Absolute path of the resultant .h5 file
        """
ssomnath's avatar
ssomnath committed
144
145
        self._verbose = verbose

146
        file_path = path.abspath(file_path)
Somnath, Suhas's avatar
Somnath, Suhas committed
147
        (folder_path, basename) = path.split(file_path)
148
        (basename, path_dict) = self._parse_file_path(file_path)
Unknown's avatar
Unknown committed
149

Somnath, Suhas's avatar
Somnath, Suhas committed
150
        h5_path = path.join(folder_path, basename + '.h5')
Somnath, Suhas's avatar
Somnath, Suhas committed
151
152
        tot_bins_multiplier = 1
        udvs_denom = 2
Unknown's avatar
Unknown committed
153

Somnath, Suhas's avatar
Somnath, Suhas committed
154
        if 'parm_txt' in path_dict.keys():
ssomnath's avatar
ssomnath committed
155
            if self._verbose:
156
                print('\treading parameters from text file')
ssomnath's avatar
ssomnath committed
157
158
            isBEPS, parm_dict = parmsToDict(path_dict['parm_txt'])

Somnath, Suhas's avatar
Somnath, Suhas committed
159
        elif 'old_mat_parms' in path_dict.keys():
ssomnath's avatar
ssomnath committed
160
            if self._verbose:
161
                print('\treading parameters from old mat file')
ssomnath's avatar
ssomnath committed
162
            parm_dict = self._get_parms_from_old_mat(path_dict['old_mat_parms'], verbose=self._verbose)
163
164
165
166
            if parm_dict['VS_steps_per_full_cycle'] == 0:
                isBEPS=False
            else:
                isBEPS=True
Somnath, Suhas's avatar
Somnath, Suhas committed
167
        else:
ssomnath's avatar
ssomnath committed
168
169
            raise FileNotFoundError('No parameters file found! Cannot '
                                    'translate this dataset!')
170

ssomnath's avatar
ssomnath committed
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
        # Initial text files named some parameters differently:
        for case in [('VS_mode', 'AC modulation mode',
                      'AC modulation mode with time reversal'),
                     ('VS_mode', 'load Arbitrary VS Wave from text file',
                      'load user defined VS Wave from file'),
                     ('BE_phase_content', 'chirp', 'chirp-sinc hybrid'),]:
            key, wrong_val, corr_val = case
            if key not in parm_dict.keys():
                continue
            if parm_dict[key] == wrong_val:
                warn('Updating parameter "{}" from invalid value of "{}" to '
                     '"{}"'.format(key, wrong_val, corr_val))
                parm_dict[key] = corr_val

        # Some .mat files did not set correct values to some parameters:
        for case in [('BE_amplitude_[V]', 1E-2, 0.5151),
                     ('VS_amplitude_[V]', 1E-2, 0.9876)]:
            key, min_val, new_val = case
            if key not in parm_dict.keys():
                continue
            if parm_dict[key] < min_val:
                warn('Updating parameter "{}" from invalid value of {} to {}'
                     ''.format(key, parm_dict[key], new_val))
                parm_dict[key] = new_val
195

ssomnath's avatar
ssomnath committed
196
        if self._verbose:
197
198
            keys = list(parm_dict.keys())
            keys.sort()
199
            print('\tExperiment parameters:')
200
201
202
203
            for key in keys:
                print('\t\t{} : {}'.format(key, parm_dict[key]))

            print('\n\tisBEPS = {}'.format(isBEPS))
Unknown's avatar
Unknown committed
204

Somnath, Suhas's avatar
Somnath, Suhas committed
205
206
207
        ignored_plt_grps = []
        if isBEPS:
            parm_dict['data_type'] = 'BEPSData'
Unknown's avatar
Unknown committed
208

Somnath, Suhas's avatar
Somnath, Suhas committed
209
210
            field_mode = parm_dict['VS_measure_in_field_loops']
            std_expt = parm_dict['VS_mode'] != 'load user defined VS Wave from file'
Unknown's avatar
Unknown committed
211

Somnath, Suhas's avatar
Somnath, Suhas committed
212
            if not std_expt:
213
                raise ValueError('This translator does not handle user defined voltage spectroscopy')
Unknown's avatar
Unknown committed
214
215
216

            spec_label = getSpectroscopicParmLabel(parm_dict['VS_mode'])

Somnath, Suhas's avatar
Somnath, Suhas committed
217
            if parm_dict['VS_mode'] in ['DC modulation mode', 'current mode']:
Somnath, Suhas's avatar
Somnath, Suhas committed
218
219
220
221
222
223
224
225
226
227
228
                if field_mode == 'in and out-of-field':
                    tot_bins_multiplier = 2
                    udvs_denom = 1
                else:
                    if field_mode == 'out-of-field':
                        ignored_plt_grps = ['in-field']
                    else:
                        ignored_plt_grps = ['out-of-field']
            else:
                tot_bins_multiplier = 1
                udvs_denom = 1
Unknown's avatar
Unknown committed
229

Somnath, Suhas's avatar
Somnath, Suhas committed
230
231
232
        else:
            spec_label = 'None'
            parm_dict['data_type'] = 'BELineData'
Unknown's avatar
Unknown committed
233

Somnath, Suhas's avatar
Somnath, Suhas committed
234
        # Check file sizes:
ssomnath's avatar
ssomnath committed
235
        if self._verbose:
236
237
            print('\tChecking sizes of real and imaginary data files')

Somnath, Suhas's avatar
Somnath, Suhas committed
238
        if 'read_real' in path_dict.keys():
Somnath, Suhas's avatar
Somnath, Suhas committed
239
240
            real_size = path.getsize(path_dict['read_real'])
            imag_size = path.getsize(path_dict['read_imag'])
Somnath, Suhas's avatar
Somnath, Suhas committed
241
242
243
        else:
            real_size = path.getsize(path_dict['write_real'])
            imag_size = path.getsize(path_dict['write_imag'])
Unknown's avatar
Unknown committed
244

Somnath, Suhas's avatar
Somnath, Suhas committed
245
        if real_size != imag_size:
ssomnath's avatar
ssomnath committed
246
247
248
249
            raise ValueError("Real and imaginary file sizes do not match!")

        if real_size == 0:
            raise ValueError('Real and imaginary files were empty')
Somnath, Suhas's avatar
Somnath, Suhas committed
250

251
        # Check here if a second channel for current is present
252
253
        # Look for the file containing the current data

ssomnath's avatar
ssomnath committed
254
        if self._verbose:
255
            print('\tLooking for secondary channels')
256
257
        file_names = listdir(folder_path)
        aux_files = []
Unknown's avatar
Unknown committed
258
        current_data_exists = False
259
260
261
262
263
264
265
        for fname in file_names:
            if 'AI2' in fname:
                if 'write' in fname:
                    current_file = path.join(folder_path, fname)
                    current_data_exists=True
                aux_files.append(path.join(folder_path, fname))

Unknown's avatar
Unknown committed
266
        add_pix = False
Somnath, Suhas's avatar
Somnath, Suhas committed
267
268
        num_rows = int(parm_dict['grid_num_rows'])
        num_cols = int(parm_dict['grid_num_cols'])
ssomnath's avatar
ssomnath committed
269
        if self._verbose:
270
            print('\tRows: {}, Cols: {}'.format(num_rows, num_cols))
Unknown's avatar
Unknown committed
271
272
        num_pix = num_rows * num_cols
        tot_bins = real_size / (num_pix * 4)
Chris Smith's avatar
Chris Smith committed
273
        # Check for case where only a single pixel is missing.
274
275
276
277
        if num_pix == 1:
            check_bins = real_size / (num_pix * 4)
        else:
            check_bins = real_size / ((num_pix - 1) * 4)
Unknown's avatar
Unknown committed
278

ssomnath's avatar
ssomnath committed
279
        if self._verbose:
280
281
282
            print('\tChecking bins: Total: {}, actual: {}'.format(tot_bins,
                                                                  check_bins))

Unknown's avatar
Unknown committed
283
        if tot_bins % 1 and check_bins % 1:
284
285
            raise ValueError('Aborting! Some parameter appears to have '
                             'changed in-between')
Somnath, Suhas's avatar
Somnath, Suhas committed
286
        elif not tot_bins % 1:
Chris Smith's avatar
Chris Smith committed
287
            # Everything's ok
Somnath, Suhas's avatar
Somnath, Suhas committed
288
289
290
            pass
        elif not check_bins % 1:
            tot_bins = check_bins
291
292
            warn('Warning:  A pixel seems to be missing from the data. '
                 'File will be padded with zeros.')
Unknown's avatar
Unknown committed
293
294
295
296
            add_pix = True

        tot_bins = int(tot_bins) * tot_bins_multiplier

Somnath, Suhas's avatar
Somnath, Suhas committed
297
        if isBEPS:
ssomnath's avatar
ssomnath committed
298
            if self._verbose:
299
                print('\tBuilding UDVS table for BEPS')
ssomnath's avatar
ssomnath committed
300
            UDVS_labs, UDVS_units, UDVS_mat = self._build_udvs_table(parm_dict)
Unknown's avatar
Unknown committed
301

ssomnath's avatar
ssomnath committed
302
            if self._verbose:
303
                print('\tTrimming UDVS table to remove unused plot group columns')
304

305
            UDVS_mat, UDVS_labs, UDVS_units = trimUDVS(UDVS_mat, UDVS_labs, UDVS_units, ignored_plt_grps)
Unknown's avatar
Unknown committed
306

307
            old_spec_inds = np.zeros(shape=(2, tot_bins), dtype=INDICES_DTYPE)
Unknown's avatar
Unknown committed
308

309
            # Will assume that all excitation waveforms have same num of bins
Unknown's avatar
Unknown committed
310
311
            num_actual_udvs_steps = UDVS_mat.shape[0] / udvs_denom
            bins_per_step = tot_bins / num_actual_udvs_steps
ssomnath's avatar
ssomnath committed
312
            if self._verbose:
313
314
                print('\t# UDVS steps: {}, # bins/step: {}'
                      ''.format(num_actual_udvs_steps, bins_per_step))
Unknown's avatar
Unknown committed
315

Somnath, Suhas's avatar
Somnath, Suhas committed
316
            if bins_per_step % 1:
Somnath, Suhas's avatar
Somnath, Suhas committed
317
318
                print('UDVS mat shape: {}, total bins: {}, bins per step: {}'.format(UDVS_mat.shape, tot_bins,
                                                                                     bins_per_step))
319
                raise ValueError('Non integer number of bins per step!')
Unknown's avatar
Unknown committed
320

Somnath, Suhas's avatar
Somnath, Suhas committed
321
322
            bins_per_step = int(bins_per_step)
            num_actual_udvs_steps = int(num_actual_udvs_steps)
Unknown's avatar
Unknown committed
323

324
325
326
            if len(np.unique(UDVS_mat[:, 2])) == 0:
                raise ValueError('No non-zero rows in AC amplitude')

Unknown's avatar
Unknown committed
327
328
            stind = 0
            for step_index in range(UDVS_mat.shape[0]):
Unknown's avatar
Unknown committed
329
330
331
                if UDVS_mat[step_index, 2] < 1E-3:  # invalid AC amplitude
                    continue
                # Bin step
332
                old_spec_inds[0, stind:stind + bins_per_step] = np.arange(bins_per_step, dtype=INDICES_DTYPE)
Unknown's avatar
Unknown committed
333
                # UDVS step
334
                old_spec_inds[1, stind:stind + bins_per_step] = step_index * np.ones(bins_per_step, dtype=INDICES_DTYPE)
Somnath, Suhas's avatar
Somnath, Suhas committed
335
                stind += bins_per_step
Somnath, Suhas's avatar
Somnath, Suhas committed
336
            del stind, step_index
Unknown's avatar
Unknown committed
337

Somnath, Suhas's avatar
Somnath, Suhas committed
338
        else:  # BE Line
ssomnath's avatar
ssomnath committed
339
            if self._verbose:
340
                print('\tPreparing supporting variables since BE-Line')
Somnath, Suhas's avatar
Somnath, Suhas committed
341
            self.signal_type = 1
Somnath, Suhas's avatar
Somnath, Suhas committed
342
            self.expt_type = 1  # Stephen has not used this index for some reason
Somnath, Suhas's avatar
Somnath, Suhas committed
343
344
            num_actual_udvs_steps = 1
            bins_per_step = tot_bins
Somnath, Suhas's avatar
Somnath, Suhas committed
345
            UDVS_labs = ['step_num', 'dc_offset', 'ac_amp', 'wave_type', 'wave_mod', 'be-line']
Somnath, Suhas's avatar
Somnath, Suhas committed
346
            UDVS_units = ['', 'V', 'A', '', '', '']
Somnath, Suhas's avatar
Somnath, Suhas committed
347
348
            UDVS_mat = np.array([1, 0, parm_dict['BE_amplitude_[V]'], 1, 1, 1],
                                dtype=np.float32).reshape(1, len(UDVS_labs))
Somnath, Suhas's avatar
Somnath, Suhas committed
349

Chris Smith's avatar
Chris Smith committed
350
351
            old_spec_inds = np.vstack((np.arange(tot_bins, dtype=INDICES_DTYPE),
                                       np.zeros(tot_bins, dtype=INDICES_DTYPE)))
Unknown's avatar
Unknown committed
352

353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
        if 'parm_mat' in path_dict.keys():
            if self._verbose:
                print('\treading BE arrays from parameters text file')
            bin_inds, bin_freqs, bin_FFT, ex_wfm = self._read_parms_mat(path_dict['parm_mat'], isBEPS)
        elif 'old_mat_parms' in path_dict.keys():
            if self._verbose:
                print('\treading BE arrays from old mat text file')
            bin_inds, bin_freqs, bin_FFT, ex_wfm, dc_amp_vec = self._read_old_mat_be_vecs(path_dict['old_mat_parms'], verbose=verbose)
        else:
            warn('No secondary parameters file (.mat) provided. Generating '
                 'dummy BE arrays')
            band_width = parm_dict['BE_band_width_[Hz]'] * (0.5 - parm_dict['BE_band_edge_trim'])
            st_f = parm_dict['BE_center_frequency_[Hz]'] - band_width
            en_f = parm_dict['BE_center_frequency_[Hz]'] + band_width
            bin_freqs = np.linspace(st_f, en_f, bins_per_step, dtype=np.float32)

            if verbose:
ssomnath's avatar
ssomnath committed
370
371
                print('\tGenerating BE arrays of length: '
                      '{}'.format(bins_per_step))
372
373
374
375
376
377
378
379
380
381
382
383
            bin_inds = np.zeros(shape=bins_per_step, dtype=np.int32)
            bin_FFT = np.zeros(shape=bins_per_step, dtype=np.complex64)
            ex_wfm = np.zeros(shape=bins_per_step, dtype=np.float32)

        # Forcing standardized datatypes:
        bin_inds = np.int32(bin_inds)
        bin_freqs = np.float32(bin_freqs)
        bin_FFT = np.complex64(bin_FFT)
        ex_wfm = np.float32(ex_wfm)

        self.FFT_BE_wave = bin_FFT

384
        # legacy parmeters inserted for BEAM
Somnath, Suhas's avatar
Somnath, Suhas committed
385
386
        parm_dict['num_bins'] = tot_bins
        parm_dict['num_pix'] = num_pix
387
        parm_dict['num_udvs_steps'] = num_actual_udvs_steps
Rama Vasudevan's avatar
Rama Vasudevan committed
388
        parm_dict['num_steps'] = num_actual_udvs_steps
Unknown's avatar
Unknown committed
389

ssomnath's avatar
ssomnath committed
390
        if self._verbose:
391
            print('\tPreparing UDVS slices for region references')
Somnath, Suhas's avatar
Somnath, Suhas committed
392
        udvs_slices = dict()
Somnath, Suhas's avatar
Somnath, Suhas committed
393
        for col_ind, col_name in enumerate(UDVS_labs):
Unknown's avatar
Unknown committed
394
395
            udvs_slices[col_name] = (slice(None), slice(col_ind, col_ind + 1))

Somnath, Suhas's avatar
Somnath, Suhas committed
396
        # Need to add the Bin Waveform type - infer from UDVS        
Unknown's avatar
Unknown committed
397
        exec_bin_vec = self.signal_type * np.ones(len(bin_inds), dtype=np.int32)
Somnath, Suhas's avatar
Somnath, Suhas committed
398
399

        if self.expt_type == 2:
ssomnath's avatar
ssomnath committed
400
            if self._verbose:
401
                print('\tExperiment type = 2. Doubling BE vectors')
Unknown's avatar
Unknown committed
402
            exec_bin_vec = np.hstack((exec_bin_vec, -1 * exec_bin_vec))
Somnath, Suhas's avatar
Somnath, Suhas committed
403
404
            bin_inds = np.hstack((bin_inds, bin_inds))
            bin_freqs = np.hstack((bin_freqs, bin_freqs))
Somnath, Suhas's avatar
Somnath, Suhas committed
405
            # This is wrong but I don't know what else to do
Somnath, Suhas's avatar
Somnath, Suhas committed
406
            bin_FFT = np.hstack((bin_FFT, bin_FFT))
Unknown's avatar
Unknown committed
407

Somnath, Suhas's avatar
Somnath, Suhas committed
408
        # Create Spectroscopic Values and Spectroscopic Values Labels datasets
409
        # This is an old and legacy way of doing things. Ideally, all we would need ot do is just get the unit values
ssomnath's avatar
ssomnath committed
410
        if self._verbose:
411
            print('\tCalculating spectroscopic values')
ssomnath's avatar
ssomnath committed
412
413
414
415
        ret_vals = createSpecVals(UDVS_mat, old_spec_inds, bin_freqs,
                                  exec_bin_vec, parm_dict, UDVS_labs,
                                  UDVS_units, verbose=verbose)
        spec_vals, spec_inds, spec_vals_labs, spec_vals_units, spec_vals_labs_names = ret_vals
416

ssomnath's avatar
ssomnath committed
417
        if self._verbose:
418
            print('\t\tspec_vals_labs: {}'.format(spec_vals_labs))
419
420
421
            unit_vals = get_unit_values(spec_inds, spec_vals,
                                        all_dim_names=spec_vals_labs,
                                        is_spec=True, verbose=False)
422
423
424
425
            print('\tUnit spectroscopic values')
            for key, val in unit_vals.items():
                print('\t\t{} : length: {}, values:\n\t\t\t{}'.format(key, len(val), val))

426
427
428
429
        if spec_inds.shape[1] != tot_bins:
            raise ValueError('Second axis of spectroscopic indices: {} not '
                             'matching with second axis of the expected main '
                             'dataset: {}'.format(spec_inds.shape, tot_bins))
430

431
432
433
434
        # Not sure what is happening here but this should work.
        spec_dim_dict = dict()
        for entry in spec_vals_labs_names:
            spec_dim_dict[entry[0] + '_parameters'] = entry[1]
Chris Smith's avatar
Chris Smith committed
435

Somnath, Suhas's avatar
Somnath, Suhas committed
436
437
438
        spec_vals_slices = dict()

        for row_ind, row_name in enumerate(spec_vals_labs):
Unknown's avatar
Unknown committed
439
            spec_vals_slices[row_name] = (slice(row_ind, row_ind + 1), slice(None))
Somnath, Suhas's avatar
Somnath, Suhas committed
440

441
        if path.exists(h5_path):
ssomnath's avatar
ssomnath committed
442
            if self._verbose:
443
                print('\tRemoving existing / old translated file: ' + h5_path)
444
            remove(h5_path)
Chris Smith's avatar
Chris Smith committed
445

446
        # First create the file
ssomnath's avatar
ssomnath committed
447
        h5_f = h5py.File(h5_path, mode='w')
Somnath, Suhas's avatar
Somnath, Suhas committed
448

449
        # Then write root level attributes
450
        global_parms = dict()
Somnath, Suhas's avatar
Somnath, Suhas committed
451
452
        global_parms['grid_size_x'] = parm_dict['grid_num_cols']
        global_parms['grid_size_y'] = parm_dict['grid_num_rows']
Somnath, Suhas's avatar
Somnath, Suhas committed
453
454
455
456
        try:
            global_parms['experiment_date'] = parm_dict['File_date_and_time']
        except KeyError:
            global_parms['experiment_date'] = '1:1:1'
Chris Smith's avatar
Chris Smith committed
457

Somnath, Suhas's avatar
Somnath, Suhas committed
458
        # assuming that the experiment was completed:
Unknown's avatar
Unknown committed
459
460
        global_parms['current_position_x'] = parm_dict['grid_num_cols'] - 1
        global_parms['current_position_y'] = parm_dict['grid_num_rows'] - 1
Somnath, Suhas's avatar
Somnath, Suhas committed
461
        global_parms['data_type'] = parm_dict['data_type']
Somnath, Suhas's avatar
Somnath, Suhas committed
462
        global_parms['translator'] = 'ODF'
ssomnath's avatar
ssomnath committed
463
        if self._verbose:
464
            print('\tWriting attributes to HDF5 file root')
465
        write_simple_attrs(h5_f, global_parms)
466
        write_book_keeping_attrs(h5_f)
Unknown's avatar
Unknown committed
467

468
469
        # Then create the measurement group
        h5_meas_group = create_indexed_group(h5_f, 'Measurement')
Unknown's avatar
Unknown committed
470

471
        # Write attributes at the measurement group level
ssomnath's avatar
ssomnath committed
472
        if self._verbose:
473
            print('\twriting attributes to Measurement group')
474
        write_simple_attrs(h5_meas_group, parm_dict)
Unknown's avatar
Unknown committed
475

476
477
        # Create the Channel group
        h5_chan_grp = create_indexed_group(h5_meas_group, 'Channel')
Unknown's avatar
Unknown committed
478

479
        # Write channel group attributes
Rama Vasudevan's avatar
Rama Vasudevan committed
480
481
        write_simple_attrs(h5_chan_grp, {'Channel_Input': 'IO_Analog_Input_1',
                                         'channel_type': 'BE'})
Unknown's avatar
Unknown committed
482

483
        # Now the datasets!
ssomnath's avatar
ssomnath committed
484
        if self._verbose:
485
            print('\tCreating ancillary datasets')
Chris Smith's avatar
Chris Smith committed
486
        h5_chan_grp.create_dataset('Excitation_Waveform', data=ex_wfm)
Unknown's avatar
Unknown committed
487

488
        h5_udvs = h5_chan_grp.create_dataset('UDVS', data=UDVS_mat)
ssomnath's avatar
ssomnath committed
489
490
491
        # TODO: Avoid using region references in USID
        write_region_references(h5_udvs, udvs_slices, add_labels_attr=True, verbose=self._verbose)
        write_simple_attrs(h5_udvs, {'units': UDVS_units}, verbose=False)
492

Chris Smith's avatar
Chris Smith committed
493
        h5_chan_grp.create_dataset('UDVS_Indices', data=old_spec_inds[1])
494

Chris Smith's avatar
Chris Smith committed
495
496
        h5_chan_grp.create_dataset('Bin_Step', data=np.arange(bins_per_step, dtype=INDICES_DTYPE),
                                   dtype=INDICES_DTYPE)
497

Chris Smith's avatar
Chris Smith committed
498
499
500
501
        h5_chan_grp.create_dataset('Bin_Indices', data=bin_inds, dtype=INDICES_DTYPE)
        h5_chan_grp.create_dataset('Bin_Frequencies', data=bin_freqs)
        h5_chan_grp.create_dataset('Bin_FFT', data=bin_FFT)
        h5_chan_grp.create_dataset('Bin_Wfm_Type', data=exec_bin_vec)
502

ssomnath's avatar
ssomnath committed
503
        if self._verbose:
504
505
506
507
            print('\tWriting Position datasets')

        pos_dims = [Dimension('X', 'm', np.arange(num_cols)),
                    Dimension('Y', 'm', np.arange(num_rows))]
ssomnath's avatar
ssomnath committed
508
509
        h5_pos_ind, h5_pos_val = write_ind_val_dsets(h5_chan_grp, pos_dims, is_spectral=False, verbose=self._verbose)
        if self._verbose:
510
            print('\tPosition datasets of shape: {}'.format(h5_pos_ind.shape))
511

ssomnath's avatar
ssomnath committed
512
        if self._verbose:
513
            print('\tWriting Spectroscopic datasets of shape: {}'.format(spec_inds.shape))
514
515
516
        h5_spec_inds = h5_chan_grp.create_dataset('Spectroscopic_Indices', data=spec_inds, dtype=INDICES_DTYPE)        
        h5_spec_vals = h5_chan_grp.create_dataset('Spectroscopic_Values', data=np.array(spec_vals), dtype=VALUES_DTYPE)
        for dset in [h5_spec_inds, h5_spec_vals]:
ssomnath's avatar
ssomnath committed
517
518
            write_region_references(dset, spec_vals_slices, add_labels_attr=True, verbose=self._verbose)
            write_simple_attrs(dset, {'units': spec_vals_units}, verbose=False)
519
            write_simple_attrs(dset, spec_dim_dict)
520
521

        # Noise floor should be of shape: (udvs_steps x 3 x positions)
ssomnath's avatar
ssomnath committed
522
        if self._verbose:
523
            print('\tWriting noise floor dataset')
Chris Smith's avatar
Chris Smith committed
524
525
        h5_chan_grp.create_dataset('Noise_Floor', (num_pix, num_actual_udvs_steps), dtype=nf32,
                                   chunks=(1, num_actual_udvs_steps))
526
527
528
529
530
531
532
533
534
535
536

        """
        New Method for chunking the Main_Data dataset.  Chunking is now done in N-by-N squares
        of UDVS steps by pixels.  N is determined dynamically based on the dimensions of the
        dataset.  Currently it is set such that individual chunks are less than 10kB in size.

        Chris Smith -- csmith55@utk.edu
        """
        BEPS_chunks = calc_chunks([num_pix, tot_bins],
                                  np.complex64(0).itemsize,
                                  unit_chunks=(1, bins_per_step))
ssomnath's avatar
ssomnath committed
537
        if self._verbose:
538
            print('\tHDF5 dataset will have chunks of size: {}'.format(BEPS_chunks))
539
            print('\tCreating empty main dataset of shape: ({}, {})'.format(num_pix, tot_bins))
540
541
542
        self.h5_raw = write_main_dataset(h5_chan_grp, (num_pix, tot_bins), 'Raw_Data', 'Piezoresponse', 'V', None, None,
                                         dtype=np.complex64, chunks=BEPS_chunks, compression='gzip',
                                         h5_pos_inds=h5_pos_ind, h5_pos_vals=h5_pos_val, h5_spec_inds=h5_spec_inds,
ssomnath's avatar
ssomnath committed
543
                                         h5_spec_vals=h5_spec_vals, verbose=self._verbose)
Somnath, Suhas's avatar
Somnath, Suhas committed
544

ssomnath's avatar
ssomnath committed
545
        if self._verbose:
546
547
            print('\tReading data from binary data files into raw HDF5')
        self._read_data(UDVS_mat, parm_dict, path_dict, real_size, isBEPS,
ssomnath's avatar
ssomnath committed
548
                        add_pix)
Unknown's avatar
Unknown committed
549

ssomnath's avatar
ssomnath committed
550
        if self._verbose:
551
            print('\tGenerating plot groups')
552
        generatePlotGroups(self.h5_raw, self.mean_resp, folder_path, basename,
Somnath, Suhas's avatar
Somnath, Suhas committed
553
                           self.max_resp, self.min_resp, max_mem_mb=self.max_ram,
Somnath, Suhas's avatar
Somnath, Suhas committed
554
                           spec_label=spec_label, show_plots=show_plots, save_plots=save_plots,
ssomnath's avatar
ssomnath committed
555
556
                           do_histogram=do_histogram, debug=self._verbose)
        if self._verbose:
557
            print('\tUpgrading to USIDataset')
558
        self.h5_raw = USIDataset(self.h5_raw)
Unknown's avatar
Unknown committed
559
560
561

        # Go ahead and read the current data in the second (current) channel
        if current_data_exists:                     #If a .dat file matches
ssomnath's avatar
ssomnath committed
562
            if self._verbose:
563
                print('\tReading data in secondary channels (current)')
564
            self._read_secondary_channel(h5_meas_group, aux_files)
565

ssomnath's avatar
ssomnath committed
566
        if self._verbose:
567
            print('\tClosing HDF5 file')
568
        h5_f.close()
Unknown's avatar
Unknown committed
569

Somnath, Suhas's avatar
Somnath, Suhas committed
570
        return h5_path
Chris Smith's avatar
Chris Smith committed
571

572
    def _read_data(self, UDVS_mat, parm_dict, path_dict, real_size, isBEPS,
ssomnath's avatar
ssomnath committed
573
                   add_pix):
Chris Smith's avatar
Chris Smith committed
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
        """
        Checks if the data is BEPS or BELine and calls the correct function to read the data from
        file

        Parameters
        ----------
        UDVS_mat : numpy.ndarray of float
            UDVS table
        parm_dict : dict
            Experimental parameters
        path_dict : dict
            Dictionary of data files to be read
        real_size : dict
            Size of each data file
        isBEPS : boolean
            Is the data BEPS
        add_pix : boolean
            Does the reader need to add extra pixels to the end of the dataset

        Returns
        -------
        None
        """
        # Now read the raw data files:
        if not isBEPS:
            # Do this for all BE-Line (always small enough to read in one shot)
ssomnath's avatar
ssomnath committed
600
            if self._verbose:
601
                print('\t\tReading all raw data for BE-Line in one shot')
602
603
            self._quick_read_data(path_dict['read_real'],
                                  path_dict['read_imag'],
ssomnath's avatar
ssomnath committed
604
                                  parm_dict['num_udvs_steps'])
605
606
        elif real_size < self.max_ram and \
                parm_dict['VS_measure_in_field_loops'] == 'out-of-field':
Chris Smith's avatar
Chris Smith committed
607
            # Do this for out-of-field BEPS ONLY that is also small (256 MB)
ssomnath's avatar
ssomnath committed
608
            if self._verbose:
609
610
611
                print('\t\tReading all raw BEPS (out-of-field) data at once')
            self._quick_read_data(path_dict['read_real'],
                                  path_dict['read_imag'],
ssomnath's avatar
ssomnath committed
612
                                  parm_dict['num_udvs_steps'])
613
614
        elif real_size < self.max_ram and \
                parm_dict['VS_measure_in_field_loops'] == 'in-field':
Chris Smith's avatar
Chris Smith committed
615
            # Do this for in-field only
ssomnath's avatar
ssomnath committed
616
            if self._verbose:
617
618
619
                print('\t\tReading all raw BEPS (in-field only) data at once')
            self._quick_read_data(path_dict['write_real'],
                                  path_dict['write_imag'],
ssomnath's avatar
ssomnath committed
620
                                  parm_dict['num_udvs_steps'])
Chris Smith's avatar
Chris Smith committed
621
622
        else:
            # Large BEPS datasets OR those with in-and-out of field
ssomnath's avatar
ssomnath committed
623
            if self._verbose:
624
625
626
627
628
                print('\t\tReading all raw data for in-and-out-of-field OR '
                      'very large file one pixel at a time')
            self._read_beps_data(path_dict, UDVS_mat.shape[0],
                                 parm_dict['VS_measure_in_field_loops'],
                                 add_pix)
629
        self.h5_raw.file.flush()
Chris Smith's avatar
Chris Smith committed
630

631
    def _read_beps_data(self, path_dict, udvs_steps, mode, add_pixel=False):
Somnath, Suhas's avatar
Somnath, Suhas committed
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
        """
        Reads the imaginary and real data files pixelwise and writes to the H5 file 
        
        Parameters 
        --------------------
        path_dict : dictionary
            Dictionary containing the absolute paths of the real and imaginary data files
        udvs_steps : unsigned int
            Number of UDVS steps
        mode : String / Unicode
            'in-field', 'out-of-field', or 'in and out-of-field'
        add_pixel : boolean. (Optional; default is False)
            If an empty pixel worth of data should be written to the end             
        
        Returns 
        -------------------- 
        None
        """
Unknown's avatar
Unknown committed
650

Somnath, Suhas's avatar
Somnath, Suhas committed
651
        print('---- reading pixel-by-pixel ----------')
Unknown's avatar
Unknown committed
652
653
654
655

        bytes_per_pix = self.h5_raw.shape[1] * 4
        step_size = self.h5_raw.shape[1] / udvs_steps

Somnath, Suhas's avatar
Somnath, Suhas committed
656
        if mode == 'out-of-field':
Unknown's avatar
Unknown committed
657
            parsers = [BEodfParser(path_dict['read_real'], path_dict['read_imag'],
Somnath, Suhas's avatar
Somnath, Suhas committed
658
                                   self.h5_raw.shape[0], bytes_per_pix)]
Somnath, Suhas's avatar
Somnath, Suhas committed
659
        elif mode == 'in-field':
Unknown's avatar
Unknown committed
660
            parsers = [BEodfParser(path_dict['write_real'], path_dict['write_imag'],
Somnath, Suhas's avatar
Somnath, Suhas committed
661
                                   self.h5_raw.shape[0], bytes_per_pix)]
Somnath, Suhas's avatar
Somnath, Suhas committed
662
663
        elif mode == 'in and out-of-field':
            # each file will only have half the udvs steps:
Unknown's avatar
Unknown committed
664
            if 0.5 * udvs_steps % 1:
665
666
                raise ValueError('Odd number of UDVS')

Unknown's avatar
Unknown committed
667
            udvs_steps = int(0.5 * udvs_steps)
Somnath, Suhas's avatar
Somnath, Suhas committed
668
            # be careful - each pair contains only half the necessary bins - so read half
Unknown's avatar
Unknown committed
669
            parsers = [BEodfParser(path_dict['write_real'], path_dict['write_imag'],
Somnath, Suhas's avatar
Somnath, Suhas committed
670
                                   self.h5_raw.shape[0], int(bytes_per_pix / 2)),
Unknown's avatar
Unknown committed
671
672
673
                       BEodfParser(path_dict['read_real'], path_dict['read_imag'],
                                   self.h5_raw.shape[0], int(bytes_per_pix / 2))]

Somnath, Suhas's avatar
Somnath, Suhas committed
674
            if step_size % 1:
675
676
                raise ValueError('strange number of bins per UDVS step. Exiting')

Somnath, Suhas's avatar
Somnath, Suhas committed
677
            step_size = int(step_size)
678

679
680
        rand_spectra = self._get_random_spectra(parsers, self.h5_raw.shape[0], udvs_steps, step_size,
                                                num_spectra=self.num_rand_spectra)
681
        take_conjugate = requires_conjugate(rand_spectra, cores=self._cores)
682

Somnath, Suhas's avatar
Somnath, Suhas committed
683
684
685
686
        self.mean_resp = np.zeros(shape=(self.h5_raw.shape[1]), dtype=np.complex64)
        self.max_resp = np.zeros(shape=(self.h5_raw.shape[0]), dtype=np.float32)
        self.min_resp = np.zeros(shape=(self.h5_raw.shape[0]), dtype=np.float32)

Unknown's avatar
Unknown committed
687
        numpix = self.h5_raw.shape[0]
Somnath, Suhas's avatar
Somnath, Suhas committed
688
689
690
        """ 
        Don't try to do the last step if a pixel is missing.   
        This will be handled after the loop. 
Unknown's avatar
Unknown committed
691
692
693
694
        """
        if add_pixel:
            numpix -= 1

Somnath, Suhas's avatar
Somnath, Suhas committed
695
        for pix_indx in range(numpix):
Somnath, Suhas's avatar
Somnath, Suhas committed
696
            if self.h5_raw.shape[0] > 5:
Unknown's avatar
Unknown committed
697
698
699
                if pix_indx % int(round(self.h5_raw.shape[0] / 10)) == 0:
                    print('Reading... {} complete'.format(round(100 * pix_indx / self.h5_raw.shape[0])))

Somnath, Suhas's avatar
Somnath, Suhas committed
700
701
702
            # get the raw stream from each parser
            pxl_data = list()
            for prsr in parsers:
Somnath, Suhas's avatar
Somnath, Suhas committed
703
                pxl_data.append(prsr.read_pixel())
Unknown's avatar
Unknown committed
704

Somnath, Suhas's avatar
Somnath, Suhas committed
705
706
707
708
709
            # interleave if both in and out of field
            # we are ignoring user defined possibilities...
            if mode == 'in and out-of-field':
                in_fld = pxl_data[0]
                out_fld = pxl_data[1]
Unknown's avatar
Unknown committed
710

Somnath, Suhas's avatar
Somnath, Suhas committed
711
712
                in_fld_2 = in_fld.reshape(udvs_steps, step_size)
                out_fld_2 = out_fld.reshape(udvs_steps, step_size)
Unknown's avatar
Unknown committed
713
                raw_mat = np.empty((udvs_steps * 2, step_size), dtype=out_fld.dtype)
Somnath, Suhas's avatar
Somnath, Suhas committed
714
715
                raw_mat[0::2, :] = in_fld_2
                raw_mat[1::2, :] = out_fld_2
Somnath, Suhas's avatar
Somnath, Suhas committed
716
717
                raw_vec = raw_mat.reshape(in_fld.size + out_fld.size).transpose()
            else:
Somnath, Suhas's avatar
Somnath, Suhas committed
718
                raw_vec = pxl_data[0]  # only one parser
Somnath, Suhas's avatar
Somnath, Suhas committed
719
720
            self.max_resp[pix_indx] = np.max(np.abs(raw_vec))
            self.min_resp[pix_indx] = np.min(np.abs(raw_vec))
Unknown's avatar
Unknown committed
721
            self.mean_resp = (1 / (pix_indx + 1)) * (raw_vec + pix_indx * self.mean_resp)
722
723
724

            if take_conjugate:
                raw_vec = np.conjugate(raw_vec)
725
            self.h5_raw[pix_indx, :] = np.complex64(raw_vec[:])
726
            self.h5_raw.file.flush()
Unknown's avatar
Unknown committed
727

Somnath, Suhas's avatar
Somnath, Suhas committed
728
        # Add zeros to main_data for the missing pixel. 
Unknown's avatar
Unknown committed
729
730
731
        if add_pixel:
            self.h5_raw[-1, :] = 0 + 0j

Somnath, Suhas's avatar
Somnath, Suhas committed
732
        print('---- Finished reading files -----')
733

ssomnath's avatar
ssomnath committed
734
    def _quick_read_data(self, real_path, imag_path, udvs_steps):
Somnath, Suhas's avatar
Somnath, Suhas committed
735
        """
Somnath, Suhas's avatar
Somnath, Suhas committed
736
737
738
739
740
741
742
743
        Returns information about the excitation BE waveform present in the .mat file

        Parameters
        -----------
        real_path : String / Unicode
            Absolute file path of the real data file
        imag_path : String / Unicode
            Absolute file path of the real data file
744
745
        udvs_steps : unsigned int
            Number of UDVS steps
Somnath, Suhas's avatar
Somnath, Suhas committed
746
        """
747
748
        parser = BEodfParser(real_path, imag_path, self.h5_raw.shape[0],
                             self.h5_raw.shape[1] * 4)
749
750

        step_size = self.h5_raw.shape[1] / udvs_steps
751
752
753
754
        rand_spectra = self._get_random_spectra([parser],
                                                self.h5_raw.shape[0],
                                                udvs_steps, step_size,
                                                num_spectra=self.num_rand_spectra,
ssomnath's avatar
ssomnath committed
755
756
                                                verbose=self._verbose)
        if self._verbose:
757
            print('\t\t\tChecking if conjugate is required')
758
        take_conjugate = requires_conjugate(rand_spectra, cores=self._cores)
Somnath, Suhas's avatar
Somnath, Suhas committed
759
        raw_vec = parser.read_all_data()
760
        if take_conjugate:
ssomnath's avatar
ssomnath committed
761
            if self._verbose:
762
                print('\t'*4 + 'Taking conjugate for positive quality factors')
763
            raw_vec = np.conjugate(raw_vec)
Unknown's avatar
Unknown committed
764

Rama Vasudevan's avatar
Rama Vasudevan committed
765
766
        if raw_vec.shape != np.prod(self.h5_raw.shape):
            percentage_padded = 100 * (np.prod(self.h5_raw.shape) - raw_vec.shape) / np.prod(self.h5_raw.shape)
767
            warn('Warning! Raw data length {} is not matching placeholder length {}. '
Rama Vasudevan's avatar
Rama Vasudevan committed
768
769
770
771
772
773
774
775
776
                  'Padding zeros for {}% of the data!'.format(raw_vec.shape, np.prod(self.h5_raw.shape), percentage_padded))

            padded_raw_vec = np.zeros(np.prod(self.h5_raw.shape), dtype = np.complex64)

            padded_raw_vec[:raw_vec.shape[0]] = raw_vec
            raw_mat = padded_raw_vec.reshape(self.h5_raw.shape[0], self.h5_raw.shape[1])
        else:
            raw_mat = raw_vec.reshape(self.h5_raw.shape[0], self.h5_raw.shape[1])

Somnath, Suhas's avatar
Somnath, Suhas committed
777
        # Write to the h5 dataset:
Somnath, Suhas's avatar
Somnath, Suhas committed
778
779
780
        self.mean_resp = np.mean(raw_mat, axis=0)
        self.max_resp = np.amax(np.abs(raw_mat), axis=0)
        self.min_resp = np.amin(np.abs(raw_mat), axis=0)
781
        self.h5_raw[:, :] = np.complex64(raw_mat)
782
        self.h5_raw.file.flush()
Somnath, Suhas's avatar
Somnath, Suhas committed
783

Unknown's avatar
Unknown committed
784
785
        print('---- Finished reading files -----')

786
787
    @staticmethod
    def _parse_file_path(data_filepath):
Somnath, Suhas's avatar
Somnath, Suhas committed
788
789
790
791
792
793
794
        """
        Returns the basename and a dictionary containing the absolute file paths for the
        real and imaginary data files, text and mat parameter files in a dictionary
        
        Parameters 
        --------------------
        data_filepath: String / Unicode
Somnath, Suhas's avatar
Somnath, Suhas committed
795
            Absolute path of any file in the same directory as the .dat files
Somnath, Suhas's avatar
Somnath, Suhas committed
796
797
798
799
800
801
802
803
804
        
        Returns 
        --------------------
        basename : String / Unicode
            Basename of the dataset      
        path_dict : Dictionary
            Dictionary containing absolute paths of all necessary data and parameter files
        """
        (folder_path, basename) = path.split(data_filepath)
Unknown's avatar
Unknown committed
805
        (super_folder, basename) = path.split(folder_path)
Somnath, Suhas's avatar
Somnath, Suhas committed
806

807
808
        if basename.endswith('_d') or basename.endswith('_c'):
            # Old old data format where the folder ended with a _d or _c to denote a completed spectroscopic run
Somnath, Suhas's avatar
Somnath, Suhas committed
809
810
811
812
813
814
815
816
            basename = basename[:-2]
        """
        A single pair of real and imaginary files are / were generated for:
            BE-Line and BEPS (compiled version only generated out-of-field or 'read')
        Two pairs of real and imaginary files were generated for later BEPS datasets
            These have 'read' and 'write' prefixes to denote out or in field respectively
        """
        path_dict = dict()
Unknown's avatar
Unknown committed
817

Somnath, Suhas's avatar
Somnath, Suhas committed
818
        for file_name in listdir(folder_path):
Chris Smith's avatar
Chris Smith committed
819
            abs_path = path.join(folder_path, file_name)
Somnath, Suhas's avatar
Somnath, Suhas committed
820
821
822
823
824
            if file_name.endswith('.txt') and file_name.find('parm') > 0:
                path_dict['parm_txt'] = abs_path
            elif file_name.find('.mat') > 0:
                if file_name.find('more_parms') > 0:
                    path_dict['parm_mat'] = abs_path
Unknown's avatar
Unknown committed
825
                elif file_name == (basename + '.mat'):
Somnath, Suhas's avatar
Somnath, Suhas committed
826
827
828
829
830
831
832
833
834
835
836
837
                    path_dict['old_mat_parms'] = abs_path
            elif file_name.endswith('.dat'):
                # Need to account for the second AI channel here
                file_tag = 'read'
                if file_name.find('write') > 0:
                    file_tag = 'write'
                if file_name.find('real') > 0:
                    file_tag += '_real'
                elif file_name.find('imag') > 0:
                    file_tag += '_imag'
                path_dict[file_tag] = abs_path

Chris Smith's avatar
Chris Smith committed
838
        return basename, path_dict
Somnath, Suhas's avatar
Somnath, Suhas committed
839

ssomnath's avatar
ssomnath committed
840
    def _read_secondary_channel(self, h5_meas_group, aux_file_path):
841
842
843
844
845
846
847
848
849
850
851
852
        """
        Reads secondary channel stored in AI .mat file
        Currently works for in-field measurements only, but should be updated to
        include both in and out of field measurements

        Parameters
        -----------
        h5_meas_group : h5 group
            Reference to the Measurement group
        aux_file_path : String / Unicode
            Absolute file path of the secondary channel file.
        """
ssomnath's avatar
ssomnath committed
853
        if self._verbose:
854
            print('\t---------- Reading Secondary Channel  ----------')
855
        if isinstance(aux_file_path, (list, tuple)):
856
857
858
859
            aux_file_paths = aux_file_path
        else:
            aux_file_paths = list(aux_file_path)

860
        is_in_out_field = 'Field' in self.h5_raw.spec_dim_labels
861

862
863
864
865
866
867
868
869
870
        if not is_in_out_field and len(aux_file_paths) > 1:
            # TODO: Find a better way to handle this
            warn('\t\tField was not varied but found more than one file for '
                 'secondary channel: {}.\n\t\tResults will be overwritten'
                 ''.format([path.split(item)[-1] for item in aux_file_paths]))
        elif is_in_out_field and len(aux_file_paths) == 1:
            warn('\t\tField was varied but only one data file for secondary'
                 'channel was found. Half the data will be zeros')

871
        spectral_len = 1
872
873
874
        for dim_name, dim_size in zip(self.h5_raw.spec_dim_labels,
                                      self.h5_raw.spec_dim_sizes):
            if dim_name == 'Frequency':
875
                continue
876
            spectral_len = spectral_len * dim_size
877

878
        num_pix = self.h5_raw.shape[0]
ssomnath's avatar
ssomnath committed
879
        if self._verbose:
880
881
882
            print('\t\tExpecting this channel to be of shape: ({}, {})'
                  ''.format(num_pix, spectral_len))
            print('\t\tis_in_out_field: {}'.format(is_in_out_field))
883
884

        # create a new channel
885
886
        h5_current_channel_group = create_indexed_group(h5_meas_group,
                                                        'Channel')
887
888
889
890
891

        # Copy attributes from the main channel
        copy_attributes(self.h5_raw.parent, h5_current_channel_group)