be_odf.py 74.7 KB
Newer Older
Somnath, Suhas's avatar
Somnath, Suhas committed
1
2
3
4
5
6
7
# -*- coding: utf-8 -*-
"""
Created on Tue Nov  3 15:24:12 2015

@author: Suhas Somnath, Stephen Jesse
"""

8
from __future__ import division, print_function, absolute_import, unicode_literals
9

Somnath, Suhas's avatar
Somnath, Suhas committed
10
from os import path, listdir, remove
11
import sys
12
import datetime
13
from warnings import warn
14
import h5py
Somnath, Suhas's avatar
Somnath, Suhas committed
15
16
import numpy as np
from scipy.io.matlab import loadmat  # To load parameters stored in Matlab .mat file
17

18
from .df_utils.be_utils import trimUDVS, getSpectroscopicParmLabel, parmsToDict, generatePlotGroups, \
19
20
    createSpecVals, requires_conjugate, generate_bipolar_triangular_waveform, \
    infer_bipolar_triangular_fraction_phase, nf32
21
from pyUSID.io.translator import Translator
22
23
from pyUSID.io.write_utils import INDICES_DTYPE, VALUES_DTYPE, Dimension, calc_chunks
from pyUSID.io.hdf_utils import write_ind_val_dsets, write_main_dataset, write_region_references, \
24
    create_indexed_group, write_simple_attrs, write_book_keeping_attrs, copy_attributes,\
25
    write_reduced_anc_dsets, get_unit_values
26
from pyUSID.io.usi_data import USIDataset
27
from pyUSID.processing.comp_utils import get_available_memory
28

29
30
31
if sys.version_info.major == 3:
    unicode = str

32

Somnath, Suhas's avatar
Somnath, Suhas committed
33
34
35
36
37
class BEodfTranslator(Translator):
    """
    Translates either the Band Excitation (BE) scan or Band Excitation 
    Polarization Switching (BEPS) data format from the old data format(s) to .h5
    """
Unknown's avatar
Unknown committed
38

Chris Smith's avatar
Chris Smith committed
39
40
41
    def __init__(self, *args, **kwargs):
        super(BEodfTranslator, self).__init__(*args, **kwargs)
        self.h5_raw = None
42
        self.num_rand_spectra = kwargs.pop('num_rand_spectra', 1000)
43
        self._cores = kwargs.pop('cores', None)
Unknown's avatar
Unknown committed
44
45
46
        self.FFT_BE_wave = None
        self.signal_type = None
        self.expt_type = None
47
        self._verbose = False
Chris Smith's avatar
Chris Smith committed
48

49
    @staticmethod
50
    def is_valid_file(data_path):
51
52
53
54
55
        """
        Checks whether the provided file can be read by this translator

        Parameters
        ----------
56
        data_path : str
57
58
59
60
            Path to raw data file

        Returns
        -------
61
62
63
64
        obj : str
            Path to file that will be accepted by the translate() function if
            this translator is indeed capable of translating the provided file.
            Otherwise, None will be returned
65
        """
66
67
68
69
70
71
72
73
        if not isinstance(data_path, (str, unicode)):
            raise TypeError('data_path must be a string')

        ndf = 'newdataformat'

        data_path = path.abspath(data_path)

        if path.isfile(data_path):
74
75
76
77
            ext = data_path.split('.')[-1]
            if ext.lower() not in ['jpg', 'png', 'jpeg', 'tiff', 'mat', 'txt',
                                   'dat', 'xls', 'xlsx']:
                return None
78
79
            # we only care about the folder names at this point...
            data_path, _ = path.split(data_path)
80
81

        # Check if the data is in the new or old format:
82
83
84
85
86
87
88
        # Check one level up:
        _, dir_name = path.split(data_path)
        if dir_name == ndf:
            # Though this translator could also read the files but the NDF Translator is more robust...
            return None
        # Check one level down:
        if ndf in listdir(data_path):
89
            # Though this translator could also read the files but the NDF Translator is more robust...
90
91
92
            return None

        file_path = path.join(data_path, listdir(path=data_path)[0])
93
94

        _, path_dict = BEodfTranslator._parse_file_path(file_path)
95

96
97
        if any([x.find('bigtime_0') > 0 and x.endswith('.dat') for x in path_dict.values()]):
            # This is a G-mode Line experiment:
98
            return None
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
        if any([x.find('bigtime_0') > 0 and x.endswith('.dat') for x in
                path_dict.values()]):
            # This is a G-mode Line experiment:
            return None

        parm_found = any([piece in path_dict.keys() for piece in
                          ['parm_txt', 'old_mat_parms']])
        real_found = any([piece in path_dict.keys() for piece in
                          ['read_real', 'write_real']])
        imag_found = any([piece in path_dict.keys() for piece in
                          ['read_imag', 'write_imag']])

        if parm_found and real_found and imag_found:
            if 'parm_txt' in path_dict.keys():
                return path_dict['parm_txt']
            else:
                return path_dict['old_mat_parms']
116
        else:
117
            return None
118

119
120
    def translate(self, file_path, show_plots=True, save_plots=True,
                  do_histogram=False, verbose=False):
Somnath, Suhas's avatar
Somnath, Suhas committed
121
122
123
124
125
126
127
128
129
130
131
132
133
134
        """
        Translates .dat data file(s) to a single .h5 file
        
        Parameters
        -------------
        file_path : String / Unicode
            Absolute file path for one of the data files. 
            It is assumed that this file is of the OLD data format.
        show_plots : (optional) Boolean
            Whether or not to show intermediate plots
        save_plots : (optional) Boolean
            Whether or not to save plots to disk
        do_histogram : (optional) Boolean
            Whether or not to construct histograms to visualize data quality. Note - this takes a fair amount of time
135
136
        verbose : (optional) Boolean
            Whether or not to print statements
Somnath, Suhas's avatar
Somnath, Suhas committed
137
138
139
140
141
142
            
        Returns
        ----------
        h5_path : String / Unicode
            Absolute path of the resultant .h5 file
        """
ssomnath's avatar
ssomnath committed
143
144
        self._verbose = verbose

145
        file_path = path.abspath(file_path)
Somnath, Suhas's avatar
Somnath, Suhas committed
146
        (folder_path, basename) = path.split(file_path)
147
        (basename, path_dict) = self._parse_file_path(file_path)
Unknown's avatar
Unknown committed
148

Somnath, Suhas's avatar
Somnath, Suhas committed
149
        h5_path = path.join(folder_path, basename + '.h5')
Somnath, Suhas's avatar
Somnath, Suhas committed
150
151
        tot_bins_multiplier = 1
        udvs_denom = 2
Unknown's avatar
Unknown committed
152

Somnath, Suhas's avatar
Somnath, Suhas committed
153
        if 'parm_txt' in path_dict.keys():
ssomnath's avatar
ssomnath committed
154
            if self._verbose:
155
                print('\treading parameters from text file')
ssomnath's avatar
ssomnath committed
156
157
            isBEPS, parm_dict = parmsToDict(path_dict['parm_txt'])

Somnath, Suhas's avatar
Somnath, Suhas committed
158
        elif 'old_mat_parms' in path_dict.keys():
ssomnath's avatar
ssomnath committed
159
            if self._verbose:
160
                print('\treading parameters from old mat file')
ssomnath's avatar
ssomnath committed
161
            parm_dict = self._get_parms_from_old_mat(path_dict['old_mat_parms'], verbose=self._verbose)
162
163
164
165
            if parm_dict['VS_steps_per_full_cycle'] == 0:
                isBEPS=False
            else:
                isBEPS=True
Somnath, Suhas's avatar
Somnath, Suhas committed
166
        else:
ssomnath's avatar
ssomnath committed
167
168
            raise FileNotFoundError('No parameters file found! Cannot '
                                    'translate this dataset!')
169

ssomnath's avatar
ssomnath committed
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
        # Initial text files named some parameters differently:
        for case in [('VS_mode', 'AC modulation mode',
                      'AC modulation mode with time reversal'),
                     ('VS_mode', 'load Arbitrary VS Wave from text file',
                      'load user defined VS Wave from file'),
                     ('BE_phase_content', 'chirp', 'chirp-sinc hybrid'),]:
            key, wrong_val, corr_val = case
            if key not in parm_dict.keys():
                continue
            if parm_dict[key] == wrong_val:
                warn('Updating parameter "{}" from invalid value of "{}" to '
                     '"{}"'.format(key, wrong_val, corr_val))
                parm_dict[key] = corr_val

        # Some .mat files did not set correct values to some parameters:
        for case in [('BE_amplitude_[V]', 1E-2, 0.5151),
                     ('VS_amplitude_[V]', 1E-2, 0.9876)]:
            key, min_val, new_val = case
            if key not in parm_dict.keys():
                continue
            if parm_dict[key] < min_val:
                warn('Updating parameter "{}" from invalid value of {} to {}'
                     ''.format(key, parm_dict[key], new_val))
                parm_dict[key] = new_val
194

ssomnath's avatar
ssomnath committed
195
        if self._verbose:
196
197
            keys = list(parm_dict.keys())
            keys.sort()
198
            print('\tExperiment parameters:')
199
200
201
202
            for key in keys:
                print('\t\t{} : {}'.format(key, parm_dict[key]))

            print('\n\tisBEPS = {}'.format(isBEPS))
Unknown's avatar
Unknown committed
203

Somnath, Suhas's avatar
Somnath, Suhas committed
204
205
206
        ignored_plt_grps = []
        if isBEPS:
            parm_dict['data_type'] = 'BEPSData'
Unknown's avatar
Unknown committed
207

Somnath, Suhas's avatar
Somnath, Suhas committed
208
209
            field_mode = parm_dict['VS_measure_in_field_loops']
            std_expt = parm_dict['VS_mode'] != 'load user defined VS Wave from file'
Unknown's avatar
Unknown committed
210

Somnath, Suhas's avatar
Somnath, Suhas committed
211
            if not std_expt:
212
                raise ValueError('This translator does not handle user defined voltage spectroscopy')
Unknown's avatar
Unknown committed
213
214
215

            spec_label = getSpectroscopicParmLabel(parm_dict['VS_mode'])

Somnath, Suhas's avatar
Somnath, Suhas committed
216
            if parm_dict['VS_mode'] in ['DC modulation mode', 'current mode']:
Somnath, Suhas's avatar
Somnath, Suhas committed
217
218
219
220
221
222
223
224
225
226
227
                if field_mode == 'in and out-of-field':
                    tot_bins_multiplier = 2
                    udvs_denom = 1
                else:
                    if field_mode == 'out-of-field':
                        ignored_plt_grps = ['in-field']
                    else:
                        ignored_plt_grps = ['out-of-field']
            else:
                tot_bins_multiplier = 1
                udvs_denom = 1
Unknown's avatar
Unknown committed
228

Somnath, Suhas's avatar
Somnath, Suhas committed
229
230
231
        else:
            spec_label = 'None'
            parm_dict['data_type'] = 'BELineData'
Unknown's avatar
Unknown committed
232

Somnath, Suhas's avatar
Somnath, Suhas committed
233
        # Check file sizes:
ssomnath's avatar
ssomnath committed
234
        if self._verbose:
235
236
            print('\tChecking sizes of real and imaginary data files')

Somnath, Suhas's avatar
Somnath, Suhas committed
237
        if 'read_real' in path_dict.keys():
Somnath, Suhas's avatar
Somnath, Suhas committed
238
239
            real_size = path.getsize(path_dict['read_real'])
            imag_size = path.getsize(path_dict['read_imag'])
Somnath, Suhas's avatar
Somnath, Suhas committed
240
241
242
        else:
            real_size = path.getsize(path_dict['write_real'])
            imag_size = path.getsize(path_dict['write_imag'])
Unknown's avatar
Unknown committed
243

Somnath, Suhas's avatar
Somnath, Suhas committed
244
        if real_size != imag_size:
ssomnath's avatar
ssomnath committed
245
246
247
248
            raise ValueError("Real and imaginary file sizes do not match!")

        if real_size == 0:
            raise ValueError('Real and imaginary files were empty')
Somnath, Suhas's avatar
Somnath, Suhas committed
249

250
        # Check here if a second channel for current is present
251
252
        # Look for the file containing the current data

ssomnath's avatar
ssomnath committed
253
        if self._verbose:
254
            print('\tLooking for secondary channels')
255
256
        file_names = listdir(folder_path)
        aux_files = []
Unknown's avatar
Unknown committed
257
        current_data_exists = False
258
259
260
261
262
263
264
        for fname in file_names:
            if 'AI2' in fname:
                if 'write' in fname:
                    current_file = path.join(folder_path, fname)
                    current_data_exists=True
                aux_files.append(path.join(folder_path, fname))

Unknown's avatar
Unknown committed
265
        add_pix = False
Somnath, Suhas's avatar
Somnath, Suhas committed
266
267
        num_rows = int(parm_dict['grid_num_rows'])
        num_cols = int(parm_dict['grid_num_cols'])
ssomnath's avatar
ssomnath committed
268
        if self._verbose:
269
            print('\tRows: {}, Cols: {}'.format(num_rows, num_cols))
Unknown's avatar
Unknown committed
270
271
        num_pix = num_rows * num_cols
        tot_bins = real_size / (num_pix * 4)
Chris Smith's avatar
Chris Smith committed
272
        # Check for case where only a single pixel is missing.
273
274
275
276
        if num_pix == 1:
            check_bins = real_size / (num_pix * 4)
        else:
            check_bins = real_size / ((num_pix - 1) * 4)
Unknown's avatar
Unknown committed
277

ssomnath's avatar
ssomnath committed
278
        if self._verbose:
279
280
281
            print('\tChecking bins: Total: {}, actual: {}'.format(tot_bins,
                                                                  check_bins))

Unknown's avatar
Unknown committed
282
        if tot_bins % 1 and check_bins % 1:
283
284
            raise ValueError('Aborting! Some parameter appears to have '
                             'changed in-between')
Somnath, Suhas's avatar
Somnath, Suhas committed
285
        elif not tot_bins % 1:
Chris Smith's avatar
Chris Smith committed
286
            # Everything's ok
Somnath, Suhas's avatar
Somnath, Suhas committed
287
288
289
            pass
        elif not check_bins % 1:
            tot_bins = check_bins
290
291
            warn('Warning:  A pixel seems to be missing from the data. '
                 'File will be padded with zeros.')
Unknown's avatar
Unknown committed
292
293
294
295
            add_pix = True

        tot_bins = int(tot_bins) * tot_bins_multiplier

Somnath, Suhas's avatar
Somnath, Suhas committed
296
        if isBEPS:
ssomnath's avatar
ssomnath committed
297
            if self._verbose:
298
                print('\tBuilding UDVS table for BEPS')
ssomnath's avatar
ssomnath committed
299
            UDVS_labs, UDVS_units, UDVS_mat = self._build_udvs_table(parm_dict)
Unknown's avatar
Unknown committed
300

ssomnath's avatar
ssomnath committed
301
            if self._verbose:
302
                print('\tTrimming UDVS table to remove unused plot group columns')
303

304
            UDVS_mat, UDVS_labs, UDVS_units = trimUDVS(UDVS_mat, UDVS_labs, UDVS_units, ignored_plt_grps)
Unknown's avatar
Unknown committed
305

306
            old_spec_inds = np.zeros(shape=(2, tot_bins), dtype=INDICES_DTYPE)
Unknown's avatar
Unknown committed
307

308
            # Will assume that all excitation waveforms have same num of bins
Unknown's avatar
Unknown committed
309
310
            num_actual_udvs_steps = UDVS_mat.shape[0] / udvs_denom
            bins_per_step = tot_bins / num_actual_udvs_steps
ssomnath's avatar
ssomnath committed
311
            if self._verbose:
312
313
                print('\t# UDVS steps: {}, # bins/step: {}'
                      ''.format(num_actual_udvs_steps, bins_per_step))
Unknown's avatar
Unknown committed
314

Somnath, Suhas's avatar
Somnath, Suhas committed
315
            if bins_per_step % 1:
Somnath, Suhas's avatar
Somnath, Suhas committed
316
317
                print('UDVS mat shape: {}, total bins: {}, bins per step: {}'.format(UDVS_mat.shape, tot_bins,
                                                                                     bins_per_step))
318
                raise ValueError('Non integer number of bins per step!')
Unknown's avatar
Unknown committed
319

Somnath, Suhas's avatar
Somnath, Suhas committed
320
321
            bins_per_step = int(bins_per_step)
            num_actual_udvs_steps = int(num_actual_udvs_steps)
Unknown's avatar
Unknown committed
322

323
324
325
            if len(np.unique(UDVS_mat[:, 2])) == 0:
                raise ValueError('No non-zero rows in AC amplitude')

Unknown's avatar
Unknown committed
326
327
            stind = 0
            for step_index in range(UDVS_mat.shape[0]):
Unknown's avatar
Unknown committed
328
329
330
                if UDVS_mat[step_index, 2] < 1E-3:  # invalid AC amplitude
                    continue
                # Bin step
331
                old_spec_inds[0, stind:stind + bins_per_step] = np.arange(bins_per_step, dtype=INDICES_DTYPE)
Unknown's avatar
Unknown committed
332
                # UDVS step
333
                old_spec_inds[1, stind:stind + bins_per_step] = step_index * np.ones(bins_per_step, dtype=INDICES_DTYPE)
Somnath, Suhas's avatar
Somnath, Suhas committed
334
                stind += bins_per_step
Somnath, Suhas's avatar
Somnath, Suhas committed
335
            del stind, step_index
Unknown's avatar
Unknown committed
336

Somnath, Suhas's avatar
Somnath, Suhas committed
337
        else:  # BE Line
ssomnath's avatar
ssomnath committed
338
            if self._verbose:
339
                print('\tPreparing supporting variables since BE-Line')
Somnath, Suhas's avatar
Somnath, Suhas committed
340
            self.signal_type = 1
Somnath, Suhas's avatar
Somnath, Suhas committed
341
            self.expt_type = 1  # Stephen has not used this index for some reason
Somnath, Suhas's avatar
Somnath, Suhas committed
342
343
            num_actual_udvs_steps = 1
            bins_per_step = tot_bins
Somnath, Suhas's avatar
Somnath, Suhas committed
344
            UDVS_labs = ['step_num', 'dc_offset', 'ac_amp', 'wave_type', 'wave_mod', 'be-line']
Somnath, Suhas's avatar
Somnath, Suhas committed
345
            UDVS_units = ['', 'V', 'A', '', '', '']
Somnath, Suhas's avatar
Somnath, Suhas committed
346
347
            UDVS_mat = np.array([1, 0, parm_dict['BE_amplitude_[V]'], 1, 1, 1],
                                dtype=np.float32).reshape(1, len(UDVS_labs))
Somnath, Suhas's avatar
Somnath, Suhas committed
348

Chris Smith's avatar
Chris Smith committed
349
350
            old_spec_inds = np.vstack((np.arange(tot_bins, dtype=INDICES_DTYPE),
                                       np.zeros(tot_bins, dtype=INDICES_DTYPE)))
Unknown's avatar
Unknown committed
351

352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
        if 'parm_mat' in path_dict.keys():
            if self._verbose:
                print('\treading BE arrays from parameters text file')
            bin_inds, bin_freqs, bin_FFT, ex_wfm = self._read_parms_mat(path_dict['parm_mat'], isBEPS)
        elif 'old_mat_parms' in path_dict.keys():
            if self._verbose:
                print('\treading BE arrays from old mat text file')
            bin_inds, bin_freqs, bin_FFT, ex_wfm, dc_amp_vec = self._read_old_mat_be_vecs(path_dict['old_mat_parms'], verbose=verbose)
        else:
            warn('No secondary parameters file (.mat) provided. Generating '
                 'dummy BE arrays')
            band_width = parm_dict['BE_band_width_[Hz]'] * (0.5 - parm_dict['BE_band_edge_trim'])
            st_f = parm_dict['BE_center_frequency_[Hz]'] - band_width
            en_f = parm_dict['BE_center_frequency_[Hz]'] + band_width
            bin_freqs = np.linspace(st_f, en_f, bins_per_step, dtype=np.float32)

            if verbose:
ssomnath's avatar
ssomnath committed
369
370
                print('\tGenerating BE arrays of length: '
                      '{}'.format(bins_per_step))
371
372
373
374
375
376
377
378
379
380
381
382
            bin_inds = np.zeros(shape=bins_per_step, dtype=np.int32)
            bin_FFT = np.zeros(shape=bins_per_step, dtype=np.complex64)
            ex_wfm = np.zeros(shape=bins_per_step, dtype=np.float32)

        # Forcing standardized datatypes:
        bin_inds = np.int32(bin_inds)
        bin_freqs = np.float32(bin_freqs)
        bin_FFT = np.complex64(bin_FFT)
        ex_wfm = np.float32(ex_wfm)

        self.FFT_BE_wave = bin_FFT

383
        # legacy parmeters inserted for BEAM
Somnath, Suhas's avatar
Somnath, Suhas committed
384
385
        parm_dict['num_bins'] = tot_bins
        parm_dict['num_pix'] = num_pix
386
        parm_dict['num_udvs_steps'] = num_actual_udvs_steps
Rama Vasudevan's avatar
Rama Vasudevan committed
387
        parm_dict['num_steps'] = num_actual_udvs_steps
Unknown's avatar
Unknown committed
388

ssomnath's avatar
ssomnath committed
389
        if self._verbose:
390
            print('\tPreparing UDVS slices for region references')
Somnath, Suhas's avatar
Somnath, Suhas committed
391
        udvs_slices = dict()
Somnath, Suhas's avatar
Somnath, Suhas committed
392
        for col_ind, col_name in enumerate(UDVS_labs):
Unknown's avatar
Unknown committed
393
394
            udvs_slices[col_name] = (slice(None), slice(col_ind, col_ind + 1))

Somnath, Suhas's avatar
Somnath, Suhas committed
395
        # Need to add the Bin Waveform type - infer from UDVS        
Unknown's avatar
Unknown committed
396
        exec_bin_vec = self.signal_type * np.ones(len(bin_inds), dtype=np.int32)
Somnath, Suhas's avatar
Somnath, Suhas committed
397
398

        if self.expt_type == 2:
ssomnath's avatar
ssomnath committed
399
            if self._verbose:
400
                print('\tExperiment type = 2. Doubling BE vectors')
Unknown's avatar
Unknown committed
401
            exec_bin_vec = np.hstack((exec_bin_vec, -1 * exec_bin_vec))
Somnath, Suhas's avatar
Somnath, Suhas committed
402
403
            bin_inds = np.hstack((bin_inds, bin_inds))
            bin_freqs = np.hstack((bin_freqs, bin_freqs))
Somnath, Suhas's avatar
Somnath, Suhas committed
404
            # This is wrong but I don't know what else to do
Somnath, Suhas's avatar
Somnath, Suhas committed
405
            bin_FFT = np.hstack((bin_FFT, bin_FFT))
Unknown's avatar
Unknown committed
406

Somnath, Suhas's avatar
Somnath, Suhas committed
407
        # Create Spectroscopic Values and Spectroscopic Values Labels datasets
408
        # This is an old and legacy way of doing things. Ideally, all we would need ot do is just get the unit values
ssomnath's avatar
ssomnath committed
409
        if self._verbose:
410
            print('\tCalculating spectroscopic values')
ssomnath's avatar
ssomnath committed
411
412
413
414
        ret_vals = createSpecVals(UDVS_mat, old_spec_inds, bin_freqs,
                                  exec_bin_vec, parm_dict, UDVS_labs,
                                  UDVS_units, verbose=verbose)
        spec_vals, spec_inds, spec_vals_labs, spec_vals_units, spec_vals_labs_names = ret_vals
415

ssomnath's avatar
ssomnath committed
416
        if self._verbose:
417
            print('\t\tspec_vals_labs: {}'.format(spec_vals_labs))
418
419
420
            unit_vals = get_unit_values(spec_inds, spec_vals,
                                        all_dim_names=spec_vals_labs,
                                        is_spec=True, verbose=False)
421
422
423
424
            print('\tUnit spectroscopic values')
            for key, val in unit_vals.items():
                print('\t\t{} : length: {}, values:\n\t\t\t{}'.format(key, len(val), val))

425
426
427
428
        if spec_inds.shape[1] != tot_bins:
            raise ValueError('Second axis of spectroscopic indices: {} not '
                             'matching with second axis of the expected main '
                             'dataset: {}'.format(spec_inds.shape, tot_bins))
429

430
431
432
433
        # Not sure what is happening here but this should work.
        spec_dim_dict = dict()
        for entry in spec_vals_labs_names:
            spec_dim_dict[entry[0] + '_parameters'] = entry[1]
Chris Smith's avatar
Chris Smith committed
434

Somnath, Suhas's avatar
Somnath, Suhas committed
435
436
437
        spec_vals_slices = dict()

        for row_ind, row_name in enumerate(spec_vals_labs):
Unknown's avatar
Unknown committed
438
            spec_vals_slices[row_name] = (slice(row_ind, row_ind + 1), slice(None))
Somnath, Suhas's avatar
Somnath, Suhas committed
439

440
        if path.exists(h5_path):
ssomnath's avatar
ssomnath committed
441
            if self._verbose:
442
                print('\tRemoving existing / old translated file: ' + h5_path)
443
            remove(h5_path)
Chris Smith's avatar
Chris Smith committed
444

445
        # First create the file
ssomnath's avatar
ssomnath committed
446
        h5_f = h5py.File(h5_path, mode='w')
Somnath, Suhas's avatar
Somnath, Suhas committed
447

448
        # Then write root level attributes
449
        global_parms = dict()
Somnath, Suhas's avatar
Somnath, Suhas committed
450
451
        global_parms['grid_size_x'] = parm_dict['grid_num_cols']
        global_parms['grid_size_y'] = parm_dict['grid_num_rows']
Somnath, Suhas's avatar
Somnath, Suhas committed
452
453
454
455
        try:
            global_parms['experiment_date'] = parm_dict['File_date_and_time']
        except KeyError:
            global_parms['experiment_date'] = '1:1:1'
Chris Smith's avatar
Chris Smith committed
456

Somnath, Suhas's avatar
Somnath, Suhas committed
457
        # assuming that the experiment was completed:
Unknown's avatar
Unknown committed
458
459
        global_parms['current_position_x'] = parm_dict['grid_num_cols'] - 1
        global_parms['current_position_y'] = parm_dict['grid_num_rows'] - 1
Somnath, Suhas's avatar
Somnath, Suhas committed
460
        global_parms['data_type'] = parm_dict['data_type']
Somnath, Suhas's avatar
Somnath, Suhas committed
461
        global_parms['translator'] = 'ODF'
ssomnath's avatar
ssomnath committed
462
        if self._verbose:
463
            print('\tWriting attributes to HDF5 file root')
464
        write_simple_attrs(h5_f, global_parms)
465
        write_book_keeping_attrs(h5_f)
Unknown's avatar
Unknown committed
466

467
468
        # Then create the measurement group
        h5_meas_group = create_indexed_group(h5_f, 'Measurement')
Unknown's avatar
Unknown committed
469

470
        # Write attributes at the measurement group level
ssomnath's avatar
ssomnath committed
471
        if self._verbose:
472
            print('\twriting attributes to Measurement group')
473
        write_simple_attrs(h5_meas_group, parm_dict)
Unknown's avatar
Unknown committed
474

475
476
        # Create the Channel group
        h5_chan_grp = create_indexed_group(h5_meas_group, 'Channel')
Unknown's avatar
Unknown committed
477

478
        # Write channel group attributes
Rama Vasudevan's avatar
Rama Vasudevan committed
479
480
        write_simple_attrs(h5_chan_grp, {'Channel_Input': 'IO_Analog_Input_1',
                                         'channel_type': 'BE'})
Unknown's avatar
Unknown committed
481

482
        # Now the datasets!
ssomnath's avatar
ssomnath committed
483
        if self._verbose:
484
            print('\tCreating ancillary datasets')
Chris Smith's avatar
Chris Smith committed
485
        h5_chan_grp.create_dataset('Excitation_Waveform', data=ex_wfm)
Unknown's avatar
Unknown committed
486

487
        h5_udvs = h5_chan_grp.create_dataset('UDVS', data=UDVS_mat)
ssomnath's avatar
ssomnath committed
488
489
490
        # TODO: Avoid using region references in USID
        write_region_references(h5_udvs, udvs_slices, add_labels_attr=True, verbose=self._verbose)
        write_simple_attrs(h5_udvs, {'units': UDVS_units}, verbose=False)
491

Chris Smith's avatar
Chris Smith committed
492
        h5_chan_grp.create_dataset('UDVS_Indices', data=old_spec_inds[1])
493

Chris Smith's avatar
Chris Smith committed
494
495
        h5_chan_grp.create_dataset('Bin_Step', data=np.arange(bins_per_step, dtype=INDICES_DTYPE),
                                   dtype=INDICES_DTYPE)
496

Chris Smith's avatar
Chris Smith committed
497
498
499
500
        h5_chan_grp.create_dataset('Bin_Indices', data=bin_inds, dtype=INDICES_DTYPE)
        h5_chan_grp.create_dataset('Bin_Frequencies', data=bin_freqs)
        h5_chan_grp.create_dataset('Bin_FFT', data=bin_FFT)
        h5_chan_grp.create_dataset('Bin_Wfm_Type', data=exec_bin_vec)
501

ssomnath's avatar
ssomnath committed
502
        if self._verbose:
503
504
505
506
            print('\tWriting Position datasets')

        pos_dims = [Dimension('X', 'm', np.arange(num_cols)),
                    Dimension('Y', 'm', np.arange(num_rows))]
ssomnath's avatar
ssomnath committed
507
508
        h5_pos_ind, h5_pos_val = write_ind_val_dsets(h5_chan_grp, pos_dims, is_spectral=False, verbose=self._verbose)
        if self._verbose:
509
            print('\tPosition datasets of shape: {}'.format(h5_pos_ind.shape))
510

ssomnath's avatar
ssomnath committed
511
        if self._verbose:
512
            print('\tWriting Spectroscopic datasets of shape: {}'.format(spec_inds.shape))
513
514
515
        h5_spec_inds = h5_chan_grp.create_dataset('Spectroscopic_Indices', data=spec_inds, dtype=INDICES_DTYPE)        
        h5_spec_vals = h5_chan_grp.create_dataset('Spectroscopic_Values', data=np.array(spec_vals), dtype=VALUES_DTYPE)
        for dset in [h5_spec_inds, h5_spec_vals]:
ssomnath's avatar
ssomnath committed
516
517
            write_region_references(dset, spec_vals_slices, add_labels_attr=True, verbose=self._verbose)
            write_simple_attrs(dset, {'units': spec_vals_units}, verbose=False)
518
            write_simple_attrs(dset, spec_dim_dict)
519
520

        # Noise floor should be of shape: (udvs_steps x 3 x positions)
ssomnath's avatar
ssomnath committed
521
        if self._verbose:
522
            print('\tWriting noise floor dataset')
Chris Smith's avatar
Chris Smith committed
523
524
        h5_chan_grp.create_dataset('Noise_Floor', (num_pix, num_actual_udvs_steps), dtype=nf32,
                                   chunks=(1, num_actual_udvs_steps))
525
526
527
528
529
530
531
532
533
534
535

        """
        New Method for chunking the Main_Data dataset.  Chunking is now done in N-by-N squares
        of UDVS steps by pixels.  N is determined dynamically based on the dimensions of the
        dataset.  Currently it is set such that individual chunks are less than 10kB in size.

        Chris Smith -- csmith55@utk.edu
        """
        BEPS_chunks = calc_chunks([num_pix, tot_bins],
                                  np.complex64(0).itemsize,
                                  unit_chunks=(1, bins_per_step))
ssomnath's avatar
ssomnath committed
536
        if self._verbose:
537
            print('\tHDF5 dataset will have chunks of size: {}'.format(BEPS_chunks))
538
            print('\tCreating empty main dataset of shape: ({}, {})'.format(num_pix, tot_bins))
539
540
541
        self.h5_raw = write_main_dataset(h5_chan_grp, (num_pix, tot_bins), 'Raw_Data', 'Piezoresponse', 'V', None, None,
                                         dtype=np.complex64, chunks=BEPS_chunks, compression='gzip',
                                         h5_pos_inds=h5_pos_ind, h5_pos_vals=h5_pos_val, h5_spec_inds=h5_spec_inds,
ssomnath's avatar
ssomnath committed
542
                                         h5_spec_vals=h5_spec_vals, verbose=self._verbose)
Somnath, Suhas's avatar
Somnath, Suhas committed
543

ssomnath's avatar
ssomnath committed
544
        if self._verbose:
545
546
            print('\tReading data from binary data files into raw HDF5')
        self._read_data(UDVS_mat, parm_dict, path_dict, real_size, isBEPS,
ssomnath's avatar
ssomnath committed
547
                        add_pix)
Unknown's avatar
Unknown committed
548

ssomnath's avatar
ssomnath committed
549
        if self._verbose:
550
            print('\tGenerating plot groups')
551
        generatePlotGroups(self.h5_raw, self.mean_resp, folder_path, basename,
Somnath, Suhas's avatar
Somnath, Suhas committed
552
                           self.max_resp, self.min_resp, max_mem_mb=self.max_ram,
Somnath, Suhas's avatar
Somnath, Suhas committed
553
                           spec_label=spec_label, show_plots=show_plots, save_plots=save_plots,
ssomnath's avatar
ssomnath committed
554
555
                           do_histogram=do_histogram, debug=self._verbose)
        if self._verbose:
556
            print('\tUpgrading to USIDataset')
557
        self.h5_raw = USIDataset(self.h5_raw)
Unknown's avatar
Unknown committed
558
559
560

        # Go ahead and read the current data in the second (current) channel
        if current_data_exists:                     #If a .dat file matches
ssomnath's avatar
ssomnath committed
561
            if self._verbose:
562
                print('\tReading data in secondary channels (current)')
563
            self._read_secondary_channel(h5_meas_group, aux_files)
564

ssomnath's avatar
ssomnath committed
565
        if self._verbose:
566
            print('\tClosing HDF5 file')
567
        h5_f.close()
Unknown's avatar
Unknown committed
568

Somnath, Suhas's avatar
Somnath, Suhas committed
569
        return h5_path
Chris Smith's avatar
Chris Smith committed
570

571
    def _read_data(self, UDVS_mat, parm_dict, path_dict, real_size, isBEPS,
ssomnath's avatar
ssomnath committed
572
                   add_pix):
Chris Smith's avatar
Chris Smith committed
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
        """
        Checks if the data is BEPS or BELine and calls the correct function to read the data from
        file

        Parameters
        ----------
        UDVS_mat : numpy.ndarray of float
            UDVS table
        parm_dict : dict
            Experimental parameters
        path_dict : dict
            Dictionary of data files to be read
        real_size : dict
            Size of each data file
        isBEPS : boolean
            Is the data BEPS
        add_pix : boolean
            Does the reader need to add extra pixels to the end of the dataset

        Returns
        -------
        None
        """
        # Now read the raw data files:
        if not isBEPS:
            # Do this for all BE-Line (always small enough to read in one shot)
ssomnath's avatar
ssomnath committed
599
            if self._verbose:
600
                print('\t\tReading all raw data for BE-Line in one shot')
601
602
            self._quick_read_data(path_dict['read_real'],
                                  path_dict['read_imag'],
ssomnath's avatar
ssomnath committed
603
                                  parm_dict['num_udvs_steps'])
604
605
        elif real_size < self.max_ram and \
                parm_dict['VS_measure_in_field_loops'] == 'out-of-field':
Chris Smith's avatar
Chris Smith committed
606
            # Do this for out-of-field BEPS ONLY that is also small (256 MB)
ssomnath's avatar
ssomnath committed
607
            if self._verbose:
608
609
610
                print('\t\tReading all raw BEPS (out-of-field) data at once')
            self._quick_read_data(path_dict['read_real'],
                                  path_dict['read_imag'],
ssomnath's avatar
ssomnath committed
611
                                  parm_dict['num_udvs_steps'])
612
613
        elif real_size < self.max_ram and \
                parm_dict['VS_measure_in_field_loops'] == 'in-field':
Chris Smith's avatar
Chris Smith committed
614
            # Do this for in-field only
ssomnath's avatar
ssomnath committed
615
            if self._verbose:
616
617
618
                print('\t\tReading all raw BEPS (in-field only) data at once')
            self._quick_read_data(path_dict['write_real'],
                                  path_dict['write_imag'],
ssomnath's avatar
ssomnath committed
619
                                  parm_dict['num_udvs_steps'])
Chris Smith's avatar
Chris Smith committed
620
621
        else:
            # Large BEPS datasets OR those with in-and-out of field
ssomnath's avatar
ssomnath committed
622
            if self._verbose:
623
624
625
626
627
                print('\t\tReading all raw data for in-and-out-of-field OR '
                      'very large file one pixel at a time')
            self._read_beps_data(path_dict, UDVS_mat.shape[0],
                                 parm_dict['VS_measure_in_field_loops'],
                                 add_pix)
628
        self.h5_raw.file.flush()
Chris Smith's avatar
Chris Smith committed
629

630
    def _read_beps_data(self, path_dict, udvs_steps, mode, add_pixel=False):
Somnath, Suhas's avatar
Somnath, Suhas committed
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
        """
        Reads the imaginary and real data files pixelwise and writes to the H5 file 
        
        Parameters 
        --------------------
        path_dict : dictionary
            Dictionary containing the absolute paths of the real and imaginary data files
        udvs_steps : unsigned int
            Number of UDVS steps
        mode : String / Unicode
            'in-field', 'out-of-field', or 'in and out-of-field'
        add_pixel : boolean. (Optional; default is False)
            If an empty pixel worth of data should be written to the end             
        
        Returns 
        -------------------- 
        None
        """
Unknown's avatar
Unknown committed
649

Somnath, Suhas's avatar
Somnath, Suhas committed
650
        print('---- reading pixel-by-pixel ----------')
Unknown's avatar
Unknown committed
651
652
653
654

        bytes_per_pix = self.h5_raw.shape[1] * 4
        step_size = self.h5_raw.shape[1] / udvs_steps

Somnath, Suhas's avatar
Somnath, Suhas committed
655
        if mode == 'out-of-field':
Unknown's avatar
Unknown committed
656
            parsers = [BEodfParser(path_dict['read_real'], path_dict['read_imag'],
Somnath, Suhas's avatar
Somnath, Suhas committed
657
                                   self.h5_raw.shape[0], bytes_per_pix)]
Somnath, Suhas's avatar
Somnath, Suhas committed
658
        elif mode == 'in-field':
Unknown's avatar
Unknown committed
659
            parsers = [BEodfParser(path_dict['write_real'], path_dict['write_imag'],
Somnath, Suhas's avatar
Somnath, Suhas committed
660
                                   self.h5_raw.shape[0], bytes_per_pix)]
Somnath, Suhas's avatar
Somnath, Suhas committed
661
662
        elif mode == 'in and out-of-field':
            # each file will only have half the udvs steps:
Unknown's avatar
Unknown committed
663
            if 0.5 * udvs_steps % 1:
664
665
                raise ValueError('Odd number of UDVS')

Unknown's avatar
Unknown committed
666
            udvs_steps = int(0.5 * udvs_steps)
Somnath, Suhas's avatar
Somnath, Suhas committed
667
            # be careful - each pair contains only half the necessary bins - so read half
Unknown's avatar
Unknown committed
668
            parsers = [BEodfParser(path_dict['write_real'], path_dict['write_imag'],
Somnath, Suhas's avatar
Somnath, Suhas committed
669
                                   self.h5_raw.shape[0], int(bytes_per_pix / 2)),
Unknown's avatar
Unknown committed
670
671
672
                       BEodfParser(path_dict['read_real'], path_dict['read_imag'],
                                   self.h5_raw.shape[0], int(bytes_per_pix / 2))]

Somnath, Suhas's avatar
Somnath, Suhas committed
673
            if step_size % 1:
674
675
                raise ValueError('strange number of bins per UDVS step. Exiting')

Somnath, Suhas's avatar
Somnath, Suhas committed
676
            step_size = int(step_size)
677

678
679
        rand_spectra = self._get_random_spectra(parsers, self.h5_raw.shape[0], udvs_steps, step_size,
                                                num_spectra=self.num_rand_spectra)
680
        take_conjugate = requires_conjugate(rand_spectra, cores=self._cores)
681

Somnath, Suhas's avatar
Somnath, Suhas committed
682
683
684
685
        self.mean_resp = np.zeros(shape=(self.h5_raw.shape[1]), dtype=np.complex64)
        self.max_resp = np.zeros(shape=(self.h5_raw.shape[0]), dtype=np.float32)
        self.min_resp = np.zeros(shape=(self.h5_raw.shape[0]), dtype=np.float32)

Unknown's avatar
Unknown committed
686
        numpix = self.h5_raw.shape[0]
Somnath, Suhas's avatar
Somnath, Suhas committed
687
688
689
        """ 
        Don't try to do the last step if a pixel is missing.   
        This will be handled after the loop. 
Unknown's avatar
Unknown committed
690
691
692
693
        """
        if add_pixel:
            numpix -= 1

Somnath, Suhas's avatar
Somnath, Suhas committed
694
        for pix_indx in range(numpix):
Somnath, Suhas's avatar
Somnath, Suhas committed
695
            if self.h5_raw.shape[0] > 5:
Unknown's avatar
Unknown committed
696
697
698
                if pix_indx % int(round(self.h5_raw.shape[0] / 10)) == 0:
                    print('Reading... {} complete'.format(round(100 * pix_indx / self.h5_raw.shape[0])))

Somnath, Suhas's avatar
Somnath, Suhas committed
699
700
701
            # get the raw stream from each parser
            pxl_data = list()
            for prsr in parsers:
Somnath, Suhas's avatar
Somnath, Suhas committed
702
                pxl_data.append(prsr.read_pixel())
Unknown's avatar
Unknown committed
703

Somnath, Suhas's avatar
Somnath, Suhas committed
704
705
706
707
708
            # interleave if both in and out of field
            # we are ignoring user defined possibilities...
            if mode == 'in and out-of-field':
                in_fld = pxl_data[0]
                out_fld = pxl_data[1]
Unknown's avatar
Unknown committed
709

Somnath, Suhas's avatar
Somnath, Suhas committed
710
711
                in_fld_2 = in_fld.reshape(udvs_steps, step_size)
                out_fld_2 = out_fld.reshape(udvs_steps, step_size)
Unknown's avatar
Unknown committed
712
                raw_mat = np.empty((udvs_steps * 2, step_size), dtype=out_fld.dtype)
Somnath, Suhas's avatar
Somnath, Suhas committed
713
714
                raw_mat[0::2, :] = in_fld_2
                raw_mat[1::2, :] = out_fld_2
Somnath, Suhas's avatar
Somnath, Suhas committed
715
716
                raw_vec = raw_mat.reshape(in_fld.size + out_fld.size).transpose()
            else:
Somnath, Suhas's avatar
Somnath, Suhas committed
717
                raw_vec = pxl_data[0]  # only one parser
Somnath, Suhas's avatar
Somnath, Suhas committed
718
719
            self.max_resp[pix_indx] = np.max(np.abs(raw_vec))
            self.min_resp[pix_indx] = np.min(np.abs(raw_vec))
Unknown's avatar
Unknown committed
720
            self.mean_resp = (1 / (pix_indx + 1)) * (raw_vec + pix_indx * self.mean_resp)
721
722
723

            if take_conjugate:
                raw_vec = np.conjugate(raw_vec)
724
            self.h5_raw[pix_indx, :] = np.complex64(raw_vec[:])
725
            self.h5_raw.file.flush()
Unknown's avatar
Unknown committed
726

Somnath, Suhas's avatar
Somnath, Suhas committed
727
        # Add zeros to main_data for the missing pixel. 
Unknown's avatar
Unknown committed
728
729
730
        if add_pixel:
            self.h5_raw[-1, :] = 0 + 0j

Somnath, Suhas's avatar
Somnath, Suhas committed
731
        print('---- Finished reading files -----')
732

ssomnath's avatar
ssomnath committed
733
    def _quick_read_data(self, real_path, imag_path, udvs_steps):
Somnath, Suhas's avatar
Somnath, Suhas committed
734
        """
Somnath, Suhas's avatar
Somnath, Suhas committed
735
736
737
738
739
740
741
742
        Returns information about the excitation BE waveform present in the .mat file

        Parameters
        -----------
        real_path : String / Unicode
            Absolute file path of the real data file
        imag_path : String / Unicode
            Absolute file path of the real data file
743
744
        udvs_steps : unsigned int
            Number of UDVS steps
Somnath, Suhas's avatar
Somnath, Suhas committed
745
        """
746
747
        parser = BEodfParser(real_path, imag_path, self.h5_raw.shape[0],
                             self.h5_raw.shape[1] * 4)
748
749

        step_size = self.h5_raw.shape[1] / udvs_steps
750
751
752
753
        rand_spectra = self._get_random_spectra([parser],
                                                self.h5_raw.shape[0],
                                                udvs_steps, step_size,
                                                num_spectra=self.num_rand_spectra,
ssomnath's avatar
ssomnath committed
754
755
                                                verbose=self._verbose)
        if self._verbose:
756
            print('\t\t\tChecking if conjugate is required')
757
        take_conjugate = requires_conjugate(rand_spectra, cores=self._cores)
Somnath, Suhas's avatar
Somnath, Suhas committed
758
        raw_vec = parser.read_all_data()
759
        if take_conjugate:
ssomnath's avatar
ssomnath committed
760
            if self._verbose:
761
                print('\t'*4 + 'Taking conjugate for positive quality factors')
762
            raw_vec = np.conjugate(raw_vec)
Unknown's avatar
Unknown committed
763

Rama Vasudevan's avatar
Rama Vasudevan committed
764
765
        if raw_vec.shape != np.prod(self.h5_raw.shape):
            percentage_padded = 100 * (np.prod(self.h5_raw.shape) - raw_vec.shape) / np.prod(self.h5_raw.shape)
766
            warn('Warning! Raw data length {} is not matching placeholder length {}. '
Rama Vasudevan's avatar
Rama Vasudevan committed
767
768
769
770
771
772
773
774
775
                  'Padding zeros for {}% of the data!'.format(raw_vec.shape, np.prod(self.h5_raw.shape), percentage_padded))

            padded_raw_vec = np.zeros(np.prod(self.h5_raw.shape), dtype = np.complex64)

            padded_raw_vec[:raw_vec.shape[0]] = raw_vec
            raw_mat = padded_raw_vec.reshape(self.h5_raw.shape[0], self.h5_raw.shape[1])
        else:
            raw_mat = raw_vec.reshape(self.h5_raw.shape[0], self.h5_raw.shape[1])

Somnath, Suhas's avatar
Somnath, Suhas committed
776
        # Write to the h5 dataset:
Somnath, Suhas's avatar
Somnath, Suhas committed
777
778
779
        self.mean_resp = np.mean(raw_mat, axis=0)
        self.max_resp = np.amax(np.abs(raw_mat), axis=0)
        self.min_resp = np.amin(np.abs(raw_mat), axis=0)
780
        self.h5_raw[:, :] = np.complex64(raw_mat)
781
        self.h5_raw.file.flush()
Somnath, Suhas's avatar
Somnath, Suhas committed
782

Unknown's avatar
Unknown committed
783
784
        print('---- Finished reading files -----')

785
786
    @staticmethod
    def _parse_file_path(data_filepath):
Somnath, Suhas's avatar
Somnath, Suhas committed
787
788
789
790
791
792
793
        """
        Returns the basename and a dictionary containing the absolute file paths for the
        real and imaginary data files, text and mat parameter files in a dictionary
        
        Parameters 
        --------------------
        data_filepath: String / Unicode
Somnath, Suhas's avatar
Somnath, Suhas committed
794
            Absolute path of any file in the same directory as the .dat files
Somnath, Suhas's avatar
Somnath, Suhas committed
795
796
797
798
799
800
801
802
803
        
        Returns 
        --------------------
        basename : String / Unicode
            Basename of the dataset      
        path_dict : Dictionary
            Dictionary containing absolute paths of all necessary data and parameter files
        """
        (folder_path, basename) = path.split(data_filepath)
Unknown's avatar
Unknown committed
804
        (super_folder, basename) = path.split(folder_path)
Somnath, Suhas's avatar
Somnath, Suhas committed
805

806
807
        if basename.endswith('_d') or basename.endswith('_c'):
            # Old old data format where the folder ended with a _d or _c to denote a completed spectroscopic run
Somnath, Suhas's avatar
Somnath, Suhas committed
808
809
810
811
812
813
814
815
            basename = basename[:-2]
        """
        A single pair of real and imaginary files are / were generated for:
            BE-Line and BEPS (compiled version only generated out-of-field or 'read')
        Two pairs of real and imaginary files were generated for later BEPS datasets
            These have 'read' and 'write' prefixes to denote out or in field respectively
        """
        path_dict = dict()
Unknown's avatar
Unknown committed
816

Somnath, Suhas's avatar
Somnath, Suhas committed
817
        for file_name in listdir(folder_path):
Chris Smith's avatar
Chris Smith committed
818
            abs_path = path.join(folder_path, file_name)
Somnath, Suhas's avatar
Somnath, Suhas committed
819
820
821
822
823
            if file_name.endswith('.txt') and file_name.find('parm') > 0:
                path_dict['parm_txt'] = abs_path
            elif file_name.find('.mat') > 0:
                if file_name.find('more_parms') > 0:
                    path_dict['parm_mat'] = abs_path
Unknown's avatar
Unknown committed
824
                elif file_name == (basename + '.mat'):
Somnath, Suhas's avatar
Somnath, Suhas committed
825
826
827
828
829
830
831
832
833
834
835
836
                    path_dict['old_mat_parms'] = abs_path
            elif file_name.endswith('.dat'):
                # Need to account for the second AI channel here
                file_tag = 'read'
                if file_name.find('write') > 0:
                    file_tag = 'write'
                if file_name.find('real') > 0:
                    file_tag += '_real'
                elif file_name.find('imag') > 0:
                    file_tag += '_imag'
                path_dict[file_tag] = abs_path

Chris Smith's avatar
Chris Smith committed
837
        return basename, path_dict
Somnath, Suhas's avatar
Somnath, Suhas committed
838

ssomnath's avatar
ssomnath committed
839
    def _read_secondary_channel(self, h5_meas_group, aux_file_path):
840
841
842
843
844
845
846
847
848
849
850
851
        """
        Reads secondary channel stored in AI .mat file
        Currently works for in-field measurements only, but should be updated to
        include both in and out of field measurements

        Parameters
        -----------
        h5_meas_group : h5 group
            Reference to the Measurement group
        aux_file_path : String / Unicode
            Absolute file path of the secondary channel file.
        """
ssomnath's avatar
ssomnath committed
852
        if self._verbose:
853
            print('\t---------- Reading Secondary Channel  ----------')
854
        if isinstance(aux_file_path, (list, tuple)):
855
856
857
858
            aux_file_paths = aux_file_path
        else:
            aux_file_paths = list(aux_file_path)

859
        is_in_out_field = 'Field' in self.h5_raw.spec_dim_labels
860

861
862
863
864
865
866
867
868
869
        if not is_in_out_field and len(aux_file_paths) > 1:
            # TODO: Find a better way to handle this
            warn('\t\tField was not varied but found more than one file for '
                 'secondary channel: {}.\n\t\tResults will be overwritten'
                 ''.format([path.split(item)[-1] for item in aux_file_paths]))
        elif is_in_out_field and len(aux_file_paths) == 1:
            warn('\t\tField was varied but only one data file for secondary'
                 'channel was found. Half the data will be zeros')

870
        spectral_len = 1
871
872
873
        for dim_name, dim_size in zip(self.h5_raw.spec_dim_labels,
                                      self.h5_raw.spec_dim_sizes):
            if dim_name == 'Frequency':
874
                continue
875
            spectral_len = spectral_len * dim_size
876

877
        num_pix = self.h5_raw.shape[0]
ssomnath's avatar
ssomnath committed
878
        if self._verbose:
879
880
881
            print('\t\tExpecting this channel to be of shape: ({}, {})'
                  ''.format(num_pix, spectral_len))
            print('\t\tis_in_out_field: {}'.format(is_in_out_field))
882
883

        # create a new channel
884
885
        h5_current_channel_group = create_indexed_group(h5_meas_group,
                                                        'Channel')
886
887
888
889
890

        # Copy attributes from the main channel
        copy_attributes(self.h5_raw.parent, h5_current_channel_group)

        # Modify attributes that are different
891
892
893
        write_simple_attrs(h5_current_channel_group,
                           {'Channel_Input': 'IO_Analog_Input_2',
                            'channel_type': 'Current'},
ssomnath's avatar