be_odf.py 75.8 KB
Newer Older
Somnath, Suhas's avatar
Somnath, Suhas committed
1
2
3
4
5
6
7
# -*- coding: utf-8 -*-
"""
Created on Tue Nov  3 15:24:12 2015

@author: Suhas Somnath, Stephen Jesse
"""

8
from __future__ import division, print_function, absolute_import, unicode_literals
9

Somnath, Suhas's avatar
Somnath, Suhas committed
10
from os import path, listdir, remove
11
import sys
12
import datetime
13
from warnings import warn
14
import h5py
Somnath, Suhas's avatar
Somnath, Suhas committed
15
16
import numpy as np
from scipy.io.matlab import loadmat  # To load parameters stored in Matlab .mat file
17

18
from .df_utils.be_utils import trimUDVS, getSpectroscopicParmLabel, parmsToDict, generatePlotGroups, \
19
20
    createSpecVals, requires_conjugate, generate_bipolar_triangular_waveform, \
    infer_bipolar_triangular_fraction_phase, nf32
21
from pyUSID.io.reg_ref import write_region_references
22
from pyUSID.io.translator import Translator
23
from pyUSID.io.write_utils import INDICES_DTYPE, VALUES_DTYPE, Dimension, calc_chunks
24
from pyUSID.io.hdf_utils import write_ind_val_dsets, write_main_dataset, \
25
    create_indexed_group, write_simple_attrs, write_book_keeping_attrs, copy_attributes,\
26
    write_reduced_anc_dsets, get_unit_values
27
from pyUSID.io.usi_data import USIDataset
28
from pyUSID.processing.comp_utils import get_available_memory
29

30
31
32
if sys.version_info.major == 3:
    unicode = str

33

Somnath, Suhas's avatar
Somnath, Suhas committed
34
35
36
37
38
class BEodfTranslator(Translator):
    """
    Translates either the Band Excitation (BE) scan or Band Excitation 
    Polarization Switching (BEPS) data format from the old data format(s) to .h5
    """
Unknown's avatar
Unknown committed
39

Chris Smith's avatar
Chris Smith committed
40
41
42
    def __init__(self, *args, **kwargs):
        super(BEodfTranslator, self).__init__(*args, **kwargs)
        self.h5_raw = None
43
        self.num_rand_spectra = kwargs.pop('num_rand_spectra', 1000)
44
        self._cores = kwargs.pop('cores', None)
Unknown's avatar
Unknown committed
45
46
47
        self.FFT_BE_wave = None
        self.signal_type = None
        self.expt_type = None
48
        self._verbose = False
Chris Smith's avatar
Chris Smith committed
49

50
    @staticmethod
51
    def is_valid_file(data_path):
52
53
54
55
56
        """
        Checks whether the provided file can be read by this translator

        Parameters
        ----------
57
        data_path : str
58
59
60
61
            Path to raw data file

        Returns
        -------
62
63
64
65
        obj : str
            Path to file that will be accepted by the translate() function if
            this translator is indeed capable of translating the provided file.
            Otherwise, None will be returned
66
        """
67
68
69
70
71
72
73
74
        if not isinstance(data_path, (str, unicode)):
            raise TypeError('data_path must be a string')

        ndf = 'newdataformat'

        data_path = path.abspath(data_path)

        if path.isfile(data_path):
75
76
77
78
            ext = data_path.split('.')[-1]
            if ext.lower() not in ['jpg', 'png', 'jpeg', 'tiff', 'mat', 'txt',
                                   'dat', 'xls', 'xlsx']:
                return None
79
80
            # we only care about the folder names at this point...
            data_path, _ = path.split(data_path)
81
82

        # Check if the data is in the new or old format:
83
84
85
86
87
88
89
        # Check one level up:
        _, dir_name = path.split(data_path)
        if dir_name == ndf:
            # Though this translator could also read the files but the NDF Translator is more robust...
            return None
        # Check one level down:
        if ndf in listdir(data_path):
90
            # Though this translator could also read the files but the NDF Translator is more robust...
91
92
93
            return None

        file_path = path.join(data_path, listdir(path=data_path)[0])
94
95

        _, path_dict = BEodfTranslator._parse_file_path(file_path)
96

97
98
        if any([x.find('bigtime_0') > 0 and x.endswith('.dat') for x in path_dict.values()]):
            # This is a G-mode Line experiment:
99
            return None
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
        if any([x.find('bigtime_0') > 0 and x.endswith('.dat') for x in
                path_dict.values()]):
            # This is a G-mode Line experiment:
            return None

        parm_found = any([piece in path_dict.keys() for piece in
                          ['parm_txt', 'old_mat_parms']])
        real_found = any([piece in path_dict.keys() for piece in
                          ['read_real', 'write_real']])
        imag_found = any([piece in path_dict.keys() for piece in
                          ['read_imag', 'write_imag']])

        if parm_found and real_found and imag_found:
            if 'parm_txt' in path_dict.keys():
                return path_dict['parm_txt']
            else:
                return path_dict['old_mat_parms']
117
        else:
118
            return None
119

120
121
    def translate(self, file_path, show_plots=True, save_plots=True,
                  do_histogram=False, verbose=False):
Somnath, Suhas's avatar
Somnath, Suhas committed
122
123
124
125
126
127
128
129
130
131
132
133
134
135
        """
        Translates .dat data file(s) to a single .h5 file
        
        Parameters
        -------------
        file_path : String / Unicode
            Absolute file path for one of the data files. 
            It is assumed that this file is of the OLD data format.
        show_plots : (optional) Boolean
            Whether or not to show intermediate plots
        save_plots : (optional) Boolean
            Whether or not to save plots to disk
        do_histogram : (optional) Boolean
            Whether or not to construct histograms to visualize data quality. Note - this takes a fair amount of time
136
137
        verbose : (optional) Boolean
            Whether or not to print statements
Somnath, Suhas's avatar
Somnath, Suhas committed
138
139
140
141
142
143
            
        Returns
        ----------
        h5_path : String / Unicode
            Absolute path of the resultant .h5 file
        """
ssomnath's avatar
ssomnath committed
144
145
        self._verbose = verbose

146
        file_path = path.abspath(file_path)
Somnath, Suhas's avatar
Somnath, Suhas committed
147
        (folder_path, basename) = path.split(file_path)
148
        (basename, path_dict) = self._parse_file_path(file_path)
Unknown's avatar
Unknown committed
149

Somnath, Suhas's avatar
Somnath, Suhas committed
150
        h5_path = path.join(folder_path, basename + '.h5')
Somnath, Suhas's avatar
Somnath, Suhas committed
151
152
        tot_bins_multiplier = 1
        udvs_denom = 2
Unknown's avatar
Unknown committed
153

Somnath, Suhas's avatar
Somnath, Suhas committed
154
        if 'parm_txt' in path_dict.keys():
ssomnath's avatar
ssomnath committed
155
            if self._verbose:
156
                print('\treading parameters from text file')
ssomnath's avatar
ssomnath committed
157
158
            isBEPS, parm_dict = parmsToDict(path_dict['parm_txt'])

Somnath, Suhas's avatar
Somnath, Suhas committed
159
        elif 'old_mat_parms' in path_dict.keys():
ssomnath's avatar
ssomnath committed
160
            if self._verbose:
161
                print('\treading parameters from old mat file')
ssomnath's avatar
ssomnath committed
162
            parm_dict = self._get_parms_from_old_mat(path_dict['old_mat_parms'], verbose=self._verbose)
163
164
165
166
            if parm_dict['VS_steps_per_full_cycle'] == 0:
                isBEPS=False
            else:
                isBEPS=True
Somnath, Suhas's avatar
Somnath, Suhas committed
167
        else:
ssomnath's avatar
ssomnath committed
168
169
            raise FileNotFoundError('No parameters file found! Cannot '
                                    'translate this dataset!')
170

ssomnath's avatar
ssomnath committed
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
        # Initial text files named some parameters differently:
        for case in [('VS_mode', 'AC modulation mode',
                      'AC modulation mode with time reversal'),
                     ('VS_mode', 'load Arbitrary VS Wave from text file',
                      'load user defined VS Wave from file'),
                     ('BE_phase_content', 'chirp', 'chirp-sinc hybrid'),]:
            key, wrong_val, corr_val = case
            if key not in parm_dict.keys():
                continue
            if parm_dict[key] == wrong_val:
                warn('Updating parameter "{}" from invalid value of "{}" to '
                     '"{}"'.format(key, wrong_val, corr_val))
                parm_dict[key] = corr_val

        # Some .mat files did not set correct values to some parameters:
        for case in [('BE_amplitude_[V]', 1E-2, 0.5151),
                     ('VS_amplitude_[V]', 1E-2, 0.9876)]:
            key, min_val, new_val = case
            if key not in parm_dict.keys():
                continue
            if parm_dict[key] < min_val:
                warn('Updating parameter "{}" from invalid value of {} to {}'
                     ''.format(key, parm_dict[key], new_val))
                parm_dict[key] = new_val
195

ssomnath's avatar
ssomnath committed
196
        if self._verbose:
197
198
            keys = list(parm_dict.keys())
            keys.sort()
199
            print('\tExperiment parameters:')
200
201
202
203
            for key in keys:
                print('\t\t{} : {}'.format(key, parm_dict[key]))

            print('\n\tisBEPS = {}'.format(isBEPS))
Unknown's avatar
Unknown committed
204

Somnath, Suhas's avatar
Somnath, Suhas committed
205
206
207
        ignored_plt_grps = []
        if isBEPS:
            parm_dict['data_type'] = 'BEPSData'
Unknown's avatar
Unknown committed
208

Somnath, Suhas's avatar
Somnath, Suhas committed
209
210
            field_mode = parm_dict['VS_measure_in_field_loops']
            std_expt = parm_dict['VS_mode'] != 'load user defined VS Wave from file'
Unknown's avatar
Unknown committed
211

Somnath, Suhas's avatar
Somnath, Suhas committed
212
            if not std_expt:
213
                raise ValueError('This translator does not handle user defined voltage spectroscopy')
Unknown's avatar
Unknown committed
214
215
216

            spec_label = getSpectroscopicParmLabel(parm_dict['VS_mode'])

Somnath, Suhas's avatar
Somnath, Suhas committed
217
            if parm_dict['VS_mode'] in ['DC modulation mode', 'current mode']:
Somnath, Suhas's avatar
Somnath, Suhas committed
218
219
220
221
222
223
224
225
226
227
228
                if field_mode == 'in and out-of-field':
                    tot_bins_multiplier = 2
                    udvs_denom = 1
                else:
                    if field_mode == 'out-of-field':
                        ignored_plt_grps = ['in-field']
                    else:
                        ignored_plt_grps = ['out-of-field']
            else:
                tot_bins_multiplier = 1
                udvs_denom = 1
Unknown's avatar
Unknown committed
229

Somnath, Suhas's avatar
Somnath, Suhas committed
230
231
232
        else:
            spec_label = 'None'
            parm_dict['data_type'] = 'BELineData'
Unknown's avatar
Unknown committed
233

Somnath, Suhas's avatar
Somnath, Suhas committed
234
        # Check file sizes:
ssomnath's avatar
ssomnath committed
235
        if self._verbose:
236
237
            print('\tChecking sizes of real and imaginary data files')

Somnath, Suhas's avatar
Somnath, Suhas committed
238
        if 'read_real' in path_dict.keys():
Somnath, Suhas's avatar
Somnath, Suhas committed
239
240
            real_size = path.getsize(path_dict['read_real'])
            imag_size = path.getsize(path_dict['read_imag'])
Somnath, Suhas's avatar
Somnath, Suhas committed
241
242
243
        else:
            real_size = path.getsize(path_dict['write_real'])
            imag_size = path.getsize(path_dict['write_imag'])
Unknown's avatar
Unknown committed
244

Somnath, Suhas's avatar
Somnath, Suhas committed
245
        if real_size != imag_size:
ssomnath's avatar
ssomnath committed
246
247
248
249
            raise ValueError("Real and imaginary file sizes do not match!")

        if real_size == 0:
            raise ValueError('Real and imaginary files were empty')
Somnath, Suhas's avatar
Somnath, Suhas committed
250

251
        # Check here if a second channel for current is present
252
253
        # Look for the file containing the current data

ssomnath's avatar
ssomnath committed
254
        if self._verbose:
255
            print('\tLooking for secondary channels')
256
257
        file_names = listdir(folder_path)
        aux_files = []
Unknown's avatar
Unknown committed
258
        current_data_exists = False
259
260
261
262
263
264
265
        for fname in file_names:
            if 'AI2' in fname:
                if 'write' in fname:
                    current_file = path.join(folder_path, fname)
                    current_data_exists=True
                aux_files.append(path.join(folder_path, fname))

Unknown's avatar
Unknown committed
266
        add_pix = False
Somnath, Suhas's avatar
Somnath, Suhas committed
267
268
        num_rows = int(parm_dict['grid_num_rows'])
        num_cols = int(parm_dict['grid_num_cols'])
ssomnath's avatar
ssomnath committed
269
        if self._verbose:
270
            print('\tRows: {}, Cols: {}'.format(num_rows, num_cols))
Unknown's avatar
Unknown committed
271
272
        num_pix = num_rows * num_cols
        tot_bins = real_size / (num_pix * 4)
Chris Smith's avatar
Chris Smith committed
273
        # Check for case where only a single pixel is missing.
274
275
276
277
        if num_pix == 1:
            check_bins = real_size / (num_pix * 4)
        else:
            check_bins = real_size / ((num_pix - 1) * 4)
Unknown's avatar
Unknown committed
278

ssomnath's avatar
ssomnath committed
279
        if self._verbose:
280
281
282
            print('\tChecking bins: Total: {}, actual: {}'.format(tot_bins,
                                                                  check_bins))

Unknown's avatar
Unknown committed
283
        if tot_bins % 1 and check_bins % 1:
284
285
            raise ValueError('Aborting! Some parameter appears to have '
                             'changed in-between')
Somnath, Suhas's avatar
Somnath, Suhas committed
286
        elif not tot_bins % 1:
Chris Smith's avatar
Chris Smith committed
287
            # Everything's ok
Somnath, Suhas's avatar
Somnath, Suhas committed
288
289
290
            pass
        elif not check_bins % 1:
            tot_bins = check_bins
291
292
            warn('Warning:  A pixel seems to be missing from the data. '
                 'File will be padded with zeros.')
Unknown's avatar
Unknown committed
293
294
            add_pix = True

295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
        # This would crash and fail later if not fixed here
        # I don't like this hacky approach to solve this problem
        if isBEPS and tot_bins % 1 == 0 and parm_dict['VS_mode'] != 'Custom':
            bins_per_step = parm_dict['FORC_num_of_FORC_cycles'] * \
                            parm_dict['VS_number_of_cycles'] * \
                            parm_dict['VS_steps_per_full_cycle'] * \
                            parm_dict['BE_bins_per_read']
            if verbose:
                print('\t\tNumber of bins per step: calculated: {}, actual {}'
                      ''.format(bins_per_step, tot_bins))
            if bins_per_step < tot_bins and tot_bins / bins_per_step % 1 == 0:
                scale = int(tot_bins / bins_per_step)
                warn('Number of actual ({}) bins per step {}X larger than '
                     'calculated ({}) values. Will scale VS cycles to get '
                     'number of bins to match'
                     ''.format(tot_bins, bins_per_step, scale))
                parm_dict['VS_number_of_cycles'] *= scale

Unknown's avatar
Unknown committed
313
314
        tot_bins = int(tot_bins) * tot_bins_multiplier

Somnath, Suhas's avatar
Somnath, Suhas committed
315
        if isBEPS:
ssomnath's avatar
ssomnath committed
316
            if self._verbose:
317
                print('\tBuilding UDVS table for BEPS')
ssomnath's avatar
ssomnath committed
318
            UDVS_labs, UDVS_units, UDVS_mat = self._build_udvs_table(parm_dict)
Unknown's avatar
Unknown committed
319

ssomnath's avatar
ssomnath committed
320
            if self._verbose:
321
                print('\tTrimming UDVS table to remove unused plot group columns')
322

323
            UDVS_mat, UDVS_labs, UDVS_units = trimUDVS(UDVS_mat, UDVS_labs, UDVS_units, ignored_plt_grps)
Unknown's avatar
Unknown committed
324

325
            old_spec_inds = np.zeros(shape=(2, tot_bins), dtype=INDICES_DTYPE)
Unknown's avatar
Unknown committed
326

327
            # Will assume that all excitation waveforms have same num of bins
Unknown's avatar
Unknown committed
328
329
            num_actual_udvs_steps = UDVS_mat.shape[0] / udvs_denom
            bins_per_step = tot_bins / num_actual_udvs_steps
ssomnath's avatar
ssomnath committed
330
            if self._verbose:
331
332
                print('\t# UDVS steps: {}, # bins/step: {}'
                      ''.format(num_actual_udvs_steps, bins_per_step))
Unknown's avatar
Unknown committed
333

Somnath, Suhas's avatar
Somnath, Suhas committed
334
            if bins_per_step % 1:
Somnath, Suhas's avatar
Somnath, Suhas committed
335
336
                print('UDVS mat shape: {}, total bins: {}, bins per step: {}'.format(UDVS_mat.shape, tot_bins,
                                                                                     bins_per_step))
337
                raise ValueError('Non integer number of bins per step!')
Unknown's avatar
Unknown committed
338

Somnath, Suhas's avatar
Somnath, Suhas committed
339
340
            bins_per_step = int(bins_per_step)
            num_actual_udvs_steps = int(num_actual_udvs_steps)
Unknown's avatar
Unknown committed
341

342
343
344
            if len(np.unique(UDVS_mat[:, 2])) == 0:
                raise ValueError('No non-zero rows in AC amplitude')

Unknown's avatar
Unknown committed
345
346
            stind = 0
            for step_index in range(UDVS_mat.shape[0]):
Unknown's avatar
Unknown committed
347
348
349
                if UDVS_mat[step_index, 2] < 1E-3:  # invalid AC amplitude
                    continue
                # Bin step
350
                old_spec_inds[0, stind:stind + bins_per_step] = np.arange(bins_per_step, dtype=INDICES_DTYPE)
Unknown's avatar
Unknown committed
351
                # UDVS step
352
                old_spec_inds[1, stind:stind + bins_per_step] = step_index * np.ones(bins_per_step, dtype=INDICES_DTYPE)
Somnath, Suhas's avatar
Somnath, Suhas committed
353
                stind += bins_per_step
Somnath, Suhas's avatar
Somnath, Suhas committed
354
            del stind, step_index
Unknown's avatar
Unknown committed
355

Somnath, Suhas's avatar
Somnath, Suhas committed
356
        else:  # BE Line
ssomnath's avatar
ssomnath committed
357
            if self._verbose:
358
                print('\tPreparing supporting variables since BE-Line')
Somnath, Suhas's avatar
Somnath, Suhas committed
359
            self.signal_type = 1
Somnath, Suhas's avatar
Somnath, Suhas committed
360
            self.expt_type = 1  # Stephen has not used this index for some reason
Somnath, Suhas's avatar
Somnath, Suhas committed
361
362
            num_actual_udvs_steps = 1
            bins_per_step = tot_bins
Somnath, Suhas's avatar
Somnath, Suhas committed
363
            UDVS_labs = ['step_num', 'dc_offset', 'ac_amp', 'wave_type', 'wave_mod', 'be-line']
Somnath, Suhas's avatar
Somnath, Suhas committed
364
            UDVS_units = ['', 'V', 'A', '', '', '']
Somnath, Suhas's avatar
Somnath, Suhas committed
365
366
            UDVS_mat = np.array([1, 0, parm_dict['BE_amplitude_[V]'], 1, 1, 1],
                                dtype=np.float32).reshape(1, len(UDVS_labs))
Somnath, Suhas's avatar
Somnath, Suhas committed
367

Chris Smith's avatar
Chris Smith committed
368
369
            old_spec_inds = np.vstack((np.arange(tot_bins, dtype=INDICES_DTYPE),
                                       np.zeros(tot_bins, dtype=INDICES_DTYPE)))
Unknown's avatar
Unknown committed
370

371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
        if 'parm_mat' in path_dict.keys():
            if self._verbose:
                print('\treading BE arrays from parameters text file')
            bin_inds, bin_freqs, bin_FFT, ex_wfm = self._read_parms_mat(path_dict['parm_mat'], isBEPS)
        elif 'old_mat_parms' in path_dict.keys():
            if self._verbose:
                print('\treading BE arrays from old mat text file')
            bin_inds, bin_freqs, bin_FFT, ex_wfm, dc_amp_vec = self._read_old_mat_be_vecs(path_dict['old_mat_parms'], verbose=verbose)
        else:
            warn('No secondary parameters file (.mat) provided. Generating '
                 'dummy BE arrays')
            band_width = parm_dict['BE_band_width_[Hz]'] * (0.5 - parm_dict['BE_band_edge_trim'])
            st_f = parm_dict['BE_center_frequency_[Hz]'] - band_width
            en_f = parm_dict['BE_center_frequency_[Hz]'] + band_width
            bin_freqs = np.linspace(st_f, en_f, bins_per_step, dtype=np.float32)

            if verbose:
ssomnath's avatar
ssomnath committed
388
389
                print('\tGenerating BE arrays of length: '
                      '{}'.format(bins_per_step))
390
391
392
393
394
395
396
397
398
399
400
401
            bin_inds = np.zeros(shape=bins_per_step, dtype=np.int32)
            bin_FFT = np.zeros(shape=bins_per_step, dtype=np.complex64)
            ex_wfm = np.zeros(shape=bins_per_step, dtype=np.float32)

        # Forcing standardized datatypes:
        bin_inds = np.int32(bin_inds)
        bin_freqs = np.float32(bin_freqs)
        bin_FFT = np.complex64(bin_FFT)
        ex_wfm = np.float32(ex_wfm)

        self.FFT_BE_wave = bin_FFT

402
        # legacy parmeters inserted for BEAM
Somnath, Suhas's avatar
Somnath, Suhas committed
403
404
        parm_dict['num_bins'] = tot_bins
        parm_dict['num_pix'] = num_pix
405
        parm_dict['num_udvs_steps'] = num_actual_udvs_steps
Rama Vasudevan's avatar
Rama Vasudevan committed
406
        parm_dict['num_steps'] = num_actual_udvs_steps
Unknown's avatar
Unknown committed
407

ssomnath's avatar
ssomnath committed
408
        if self._verbose:
409
            print('\tPreparing UDVS slices for region references')
Somnath, Suhas's avatar
Somnath, Suhas committed
410
        udvs_slices = dict()
Somnath, Suhas's avatar
Somnath, Suhas committed
411
        for col_ind, col_name in enumerate(UDVS_labs):
Unknown's avatar
Unknown committed
412
413
            udvs_slices[col_name] = (slice(None), slice(col_ind, col_ind + 1))

Somnath, Suhas's avatar
Somnath, Suhas committed
414
        # Need to add the Bin Waveform type - infer from UDVS        
Unknown's avatar
Unknown committed
415
        exec_bin_vec = self.signal_type * np.ones(len(bin_inds), dtype=np.int32)
Somnath, Suhas's avatar
Somnath, Suhas committed
416
417

        if self.expt_type == 2:
ssomnath's avatar
ssomnath committed
418
            if self._verbose:
419
                print('\tExperiment type = 2. Doubling BE vectors')
Unknown's avatar
Unknown committed
420
            exec_bin_vec = np.hstack((exec_bin_vec, -1 * exec_bin_vec))
Somnath, Suhas's avatar
Somnath, Suhas committed
421
422
            bin_inds = np.hstack((bin_inds, bin_inds))
            bin_freqs = np.hstack((bin_freqs, bin_freqs))
Somnath, Suhas's avatar
Somnath, Suhas committed
423
            # This is wrong but I don't know what else to do
Somnath, Suhas's avatar
Somnath, Suhas committed
424
            bin_FFT = np.hstack((bin_FFT, bin_FFT))
Unknown's avatar
Unknown committed
425

Somnath, Suhas's avatar
Somnath, Suhas committed
426
        # Create Spectroscopic Values and Spectroscopic Values Labels datasets
427
        # This is an old and legacy way of doing things. Ideally, all we would need ot do is just get the unit values
ssomnath's avatar
ssomnath committed
428
        if self._verbose:
429
            print('\tCalculating spectroscopic values')
ssomnath's avatar
ssomnath committed
430
431
432
433
        ret_vals = createSpecVals(UDVS_mat, old_spec_inds, bin_freqs,
                                  exec_bin_vec, parm_dict, UDVS_labs,
                                  UDVS_units, verbose=verbose)
        spec_vals, spec_inds, spec_vals_labs, spec_vals_units, spec_vals_labs_names = ret_vals
434

ssomnath's avatar
ssomnath committed
435
        if self._verbose:
436
            print('\t\tspec_vals_labs: {}'.format(spec_vals_labs))
437
438
439
            unit_vals = get_unit_values(spec_inds, spec_vals,
                                        all_dim_names=spec_vals_labs,
                                        is_spec=True, verbose=False)
440
441
442
443
            print('\tUnit spectroscopic values')
            for key, val in unit_vals.items():
                print('\t\t{} : length: {}, values:\n\t\t\t{}'.format(key, len(val), val))

444
445
446
447
        if spec_inds.shape[1] != tot_bins:
            raise ValueError('Second axis of spectroscopic indices: {} not '
                             'matching with second axis of the expected main '
                             'dataset: {}'.format(spec_inds.shape, tot_bins))
448

449
450
451
452
        # Not sure what is happening here but this should work.
        spec_dim_dict = dict()
        for entry in spec_vals_labs_names:
            spec_dim_dict[entry[0] + '_parameters'] = entry[1]
Chris Smith's avatar
Chris Smith committed
453

Somnath, Suhas's avatar
Somnath, Suhas committed
454
455
456
        spec_vals_slices = dict()

        for row_ind, row_name in enumerate(spec_vals_labs):
Unknown's avatar
Unknown committed
457
            spec_vals_slices[row_name] = (slice(row_ind, row_ind + 1), slice(None))
Somnath, Suhas's avatar
Somnath, Suhas committed
458

459
        if path.exists(h5_path):
ssomnath's avatar
ssomnath committed
460
            if self._verbose:
461
                print('\tRemoving existing / old translated file: ' + h5_path)
462
            remove(h5_path)
Chris Smith's avatar
Chris Smith committed
463

464
        # First create the file
ssomnath's avatar
ssomnath committed
465
        h5_f = h5py.File(h5_path, mode='w')
Somnath, Suhas's avatar
Somnath, Suhas committed
466

467
        # Then write root level attributes
468
        global_parms = dict()
Somnath, Suhas's avatar
Somnath, Suhas committed
469
470
        global_parms['grid_size_x'] = parm_dict['grid_num_cols']
        global_parms['grid_size_y'] = parm_dict['grid_num_rows']
Somnath, Suhas's avatar
Somnath, Suhas committed
471
472
473
474
        try:
            global_parms['experiment_date'] = parm_dict['File_date_and_time']
        except KeyError:
            global_parms['experiment_date'] = '1:1:1'
Chris Smith's avatar
Chris Smith committed
475

Somnath, Suhas's avatar
Somnath, Suhas committed
476
        # assuming that the experiment was completed:
Unknown's avatar
Unknown committed
477
478
        global_parms['current_position_x'] = parm_dict['grid_num_cols'] - 1
        global_parms['current_position_y'] = parm_dict['grid_num_rows'] - 1
Somnath, Suhas's avatar
Somnath, Suhas committed
479
        global_parms['data_type'] = parm_dict['data_type']
Somnath, Suhas's avatar
Somnath, Suhas committed
480
        global_parms['translator'] = 'ODF'
ssomnath's avatar
ssomnath committed
481
        if self._verbose:
482
            print('\tWriting attributes to HDF5 file root')
483
        write_simple_attrs(h5_f, global_parms)
484
        write_book_keeping_attrs(h5_f)
Unknown's avatar
Unknown committed
485

486
487
        # Then create the measurement group
        h5_meas_group = create_indexed_group(h5_f, 'Measurement')
Unknown's avatar
Unknown committed
488

489
        # Write attributes at the measurement group level
ssomnath's avatar
ssomnath committed
490
        if self._verbose:
491
            print('\twriting attributes to Measurement group')
492
        write_simple_attrs(h5_meas_group, parm_dict)
Unknown's avatar
Unknown committed
493

494
495
        # Create the Channel group
        h5_chan_grp = create_indexed_group(h5_meas_group, 'Channel')
Unknown's avatar
Unknown committed
496

497
        # Write channel group attributes
Rama Vasudevan's avatar
Rama Vasudevan committed
498
499
        write_simple_attrs(h5_chan_grp, {'Channel_Input': 'IO_Analog_Input_1',
                                         'channel_type': 'BE'})
Unknown's avatar
Unknown committed
500

501
        # Now the datasets!
ssomnath's avatar
ssomnath committed
502
        if self._verbose:
503
            print('\tCreating ancillary datasets')
Chris Smith's avatar
Chris Smith committed
504
        h5_chan_grp.create_dataset('Excitation_Waveform', data=ex_wfm)
Unknown's avatar
Unknown committed
505

506
        h5_udvs = h5_chan_grp.create_dataset('UDVS', data=UDVS_mat)
ssomnath's avatar
ssomnath committed
507
508
509
        # TODO: Avoid using region references in USID
        write_region_references(h5_udvs, udvs_slices, add_labels_attr=True, verbose=self._verbose)
        write_simple_attrs(h5_udvs, {'units': UDVS_units}, verbose=False)
510

Chris Smith's avatar
Chris Smith committed
511
        h5_chan_grp.create_dataset('UDVS_Indices', data=old_spec_inds[1])
512

Chris Smith's avatar
Chris Smith committed
513
514
        h5_chan_grp.create_dataset('Bin_Step', data=np.arange(bins_per_step, dtype=INDICES_DTYPE),
                                   dtype=INDICES_DTYPE)
515

Chris Smith's avatar
Chris Smith committed
516
517
518
519
        h5_chan_grp.create_dataset('Bin_Indices', data=bin_inds, dtype=INDICES_DTYPE)
        h5_chan_grp.create_dataset('Bin_Frequencies', data=bin_freqs)
        h5_chan_grp.create_dataset('Bin_FFT', data=bin_FFT)
        h5_chan_grp.create_dataset('Bin_Wfm_Type', data=exec_bin_vec)
520

ssomnath's avatar
ssomnath committed
521
        if self._verbose:
522
523
524
525
            print('\tWriting Position datasets')

        pos_dims = [Dimension('X', 'm', np.arange(num_cols)),
                    Dimension('Y', 'm', np.arange(num_rows))]
ssomnath's avatar
ssomnath committed
526
527
        h5_pos_ind, h5_pos_val = write_ind_val_dsets(h5_chan_grp, pos_dims, is_spectral=False, verbose=self._verbose)
        if self._verbose:
528
            print('\tPosition datasets of shape: {}'.format(h5_pos_ind.shape))
529

ssomnath's avatar
ssomnath committed
530
        if self._verbose:
531
            print('\tWriting Spectroscopic datasets of shape: {}'.format(spec_inds.shape))
532
533
534
        h5_spec_inds = h5_chan_grp.create_dataset('Spectroscopic_Indices', data=spec_inds, dtype=INDICES_DTYPE)        
        h5_spec_vals = h5_chan_grp.create_dataset('Spectroscopic_Values', data=np.array(spec_vals), dtype=VALUES_DTYPE)
        for dset in [h5_spec_inds, h5_spec_vals]:
ssomnath's avatar
ssomnath committed
535
536
            write_region_references(dset, spec_vals_slices, add_labels_attr=True, verbose=self._verbose)
            write_simple_attrs(dset, {'units': spec_vals_units}, verbose=False)
537
            write_simple_attrs(dset, spec_dim_dict)
538
539

        # Noise floor should be of shape: (udvs_steps x 3 x positions)
ssomnath's avatar
ssomnath committed
540
        if self._verbose:
541
            print('\tWriting noise floor dataset')
Chris Smith's avatar
Chris Smith committed
542
543
        h5_chan_grp.create_dataset('Noise_Floor', (num_pix, num_actual_udvs_steps), dtype=nf32,
                                   chunks=(1, num_actual_udvs_steps))
544
545
546
547
548
549
550
551
552
553
554

        """
        New Method for chunking the Main_Data dataset.  Chunking is now done in N-by-N squares
        of UDVS steps by pixels.  N is determined dynamically based on the dimensions of the
        dataset.  Currently it is set such that individual chunks are less than 10kB in size.

        Chris Smith -- csmith55@utk.edu
        """
        BEPS_chunks = calc_chunks([num_pix, tot_bins],
                                  np.complex64(0).itemsize,
                                  unit_chunks=(1, bins_per_step))
ssomnath's avatar
ssomnath committed
555
        if self._verbose:
556
            print('\tHDF5 dataset will have chunks of size: {}'.format(BEPS_chunks))
557
            print('\tCreating empty main dataset of shape: ({}, {})'.format(num_pix, tot_bins))
558
559
560
        self.h5_raw = write_main_dataset(h5_chan_grp, (num_pix, tot_bins), 'Raw_Data', 'Piezoresponse', 'V', None, None,
                                         dtype=np.complex64, chunks=BEPS_chunks, compression='gzip',
                                         h5_pos_inds=h5_pos_ind, h5_pos_vals=h5_pos_val, h5_spec_inds=h5_spec_inds,
ssomnath's avatar
ssomnath committed
561
                                         h5_spec_vals=h5_spec_vals, verbose=self._verbose)
Somnath, Suhas's avatar
Somnath, Suhas committed
562

ssomnath's avatar
ssomnath committed
563
        if self._verbose:
564
565
            print('\tReading data from binary data files into raw HDF5')
        self._read_data(UDVS_mat, parm_dict, path_dict, real_size, isBEPS,
ssomnath's avatar
ssomnath committed
566
                        add_pix)
Unknown's avatar
Unknown committed
567

ssomnath's avatar
ssomnath committed
568
        if self._verbose:
569
            print('\tGenerating plot groups')
570
        generatePlotGroups(self.h5_raw, self.mean_resp, folder_path, basename,
Somnath, Suhas's avatar
Somnath, Suhas committed
571
                           self.max_resp, self.min_resp, max_mem_mb=self.max_ram,
Somnath, Suhas's avatar
Somnath, Suhas committed
572
                           spec_label=spec_label, show_plots=show_plots, save_plots=save_plots,
ssomnath's avatar
ssomnath committed
573
574
                           do_histogram=do_histogram, debug=self._verbose)
        if self._verbose:
575
            print('\tUpgrading to USIDataset')
576
        self.h5_raw = USIDataset(self.h5_raw)
Unknown's avatar
Unknown committed
577
578
579

        # Go ahead and read the current data in the second (current) channel
        if current_data_exists:                     #If a .dat file matches
ssomnath's avatar
ssomnath committed
580
            if self._verbose:
581
                print('\tReading data in secondary channels (current)')
582
            self._read_secondary_channel(h5_meas_group, aux_files)
583

ssomnath's avatar
ssomnath committed
584
        if self._verbose:
585
            print('\tClosing HDF5 file')
586
        h5_f.close()
Unknown's avatar
Unknown committed
587

Somnath, Suhas's avatar
Somnath, Suhas committed
588
        return h5_path
Chris Smith's avatar
Chris Smith committed
589

590
    def _read_data(self, UDVS_mat, parm_dict, path_dict, real_size, isBEPS,
ssomnath's avatar
ssomnath committed
591
                   add_pix):
Chris Smith's avatar
Chris Smith committed
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
        """
        Checks if the data is BEPS or BELine and calls the correct function to read the data from
        file

        Parameters
        ----------
        UDVS_mat : numpy.ndarray of float
            UDVS table
        parm_dict : dict
            Experimental parameters
        path_dict : dict
            Dictionary of data files to be read
        real_size : dict
            Size of each data file
        isBEPS : boolean
            Is the data BEPS
        add_pix : boolean
            Does the reader need to add extra pixels to the end of the dataset

        Returns
        -------
        None
        """
        # Now read the raw data files:
        if not isBEPS:
            # Do this for all BE-Line (always small enough to read in one shot)
ssomnath's avatar
ssomnath committed
618
            if self._verbose:
619
                print('\t\tReading all raw data for BE-Line in one shot')
620
621
            self._quick_read_data(path_dict['read_real'],
                                  path_dict['read_imag'],
ssomnath's avatar
ssomnath committed
622
                                  parm_dict['num_udvs_steps'])
623
624
        elif real_size < self.max_ram and \
                parm_dict['VS_measure_in_field_loops'] == 'out-of-field':
Chris Smith's avatar
Chris Smith committed
625
            # Do this for out-of-field BEPS ONLY that is also small (256 MB)
ssomnath's avatar
ssomnath committed
626
            if self._verbose:
627
628
629
                print('\t\tReading all raw BEPS (out-of-field) data at once')
            self._quick_read_data(path_dict['read_real'],
                                  path_dict['read_imag'],
ssomnath's avatar
ssomnath committed
630
                                  parm_dict['num_udvs_steps'])
631
632
        elif real_size < self.max_ram and \
                parm_dict['VS_measure_in_field_loops'] == 'in-field':
Chris Smith's avatar
Chris Smith committed
633
            # Do this for in-field only
ssomnath's avatar
ssomnath committed
634
            if self._verbose:
635
636
637
                print('\t\tReading all raw BEPS (in-field only) data at once')
            self._quick_read_data(path_dict['write_real'],
                                  path_dict['write_imag'],
ssomnath's avatar
ssomnath committed
638
                                  parm_dict['num_udvs_steps'])
Chris Smith's avatar
Chris Smith committed
639
640
        else:
            # Large BEPS datasets OR those with in-and-out of field
ssomnath's avatar
ssomnath committed
641
            if self._verbose:
642
643
644
645
646
                print('\t\tReading all raw data for in-and-out-of-field OR '
                      'very large file one pixel at a time')
            self._read_beps_data(path_dict, UDVS_mat.shape[0],
                                 parm_dict['VS_measure_in_field_loops'],
                                 add_pix)
647
        self.h5_raw.file.flush()
Chris Smith's avatar
Chris Smith committed
648

649
    def _read_beps_data(self, path_dict, udvs_steps, mode, add_pixel=False):
Somnath, Suhas's avatar
Somnath, Suhas committed
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
        """
        Reads the imaginary and real data files pixelwise and writes to the H5 file 
        
        Parameters 
        --------------------
        path_dict : dictionary
            Dictionary containing the absolute paths of the real and imaginary data files
        udvs_steps : unsigned int
            Number of UDVS steps
        mode : String / Unicode
            'in-field', 'out-of-field', or 'in and out-of-field'
        add_pixel : boolean. (Optional; default is False)
            If an empty pixel worth of data should be written to the end             
        
        Returns 
        -------------------- 
        None
        """
Unknown's avatar
Unknown committed
668

Somnath, Suhas's avatar
Somnath, Suhas committed
669
        print('---- reading pixel-by-pixel ----------')
Unknown's avatar
Unknown committed
670
671
672
673

        bytes_per_pix = self.h5_raw.shape[1] * 4
        step_size = self.h5_raw.shape[1] / udvs_steps

Somnath, Suhas's avatar
Somnath, Suhas committed
674
        if mode == 'out-of-field':
Unknown's avatar
Unknown committed
675
            parsers = [BEodfParser(path_dict['read_real'], path_dict['read_imag'],
Somnath, Suhas's avatar
Somnath, Suhas committed
676
                                   self.h5_raw.shape[0], bytes_per_pix)]
Somnath, Suhas's avatar
Somnath, Suhas committed
677
        elif mode == 'in-field':
Unknown's avatar
Unknown committed
678
            parsers = [BEodfParser(path_dict['write_real'], path_dict['write_imag'],
Somnath, Suhas's avatar
Somnath, Suhas committed
679
                                   self.h5_raw.shape[0], bytes_per_pix)]
Somnath, Suhas's avatar
Somnath, Suhas committed
680
681
        elif mode == 'in and out-of-field':
            # each file will only have half the udvs steps:
Unknown's avatar
Unknown committed
682
            if 0.5 * udvs_steps % 1:
683
684
                raise ValueError('Odd number of UDVS')

Unknown's avatar
Unknown committed
685
            udvs_steps = int(0.5 * udvs_steps)
Somnath, Suhas's avatar
Somnath, Suhas committed
686
            # be careful - each pair contains only half the necessary bins - so read half
Unknown's avatar
Unknown committed
687
            parsers = [BEodfParser(path_dict['write_real'], path_dict['write_imag'],
Somnath, Suhas's avatar
Somnath, Suhas committed
688
                                   self.h5_raw.shape[0], int(bytes_per_pix / 2)),
Unknown's avatar
Unknown committed
689
690
691
                       BEodfParser(path_dict['read_real'], path_dict['read_imag'],
                                   self.h5_raw.shape[0], int(bytes_per_pix / 2))]

Somnath, Suhas's avatar
Somnath, Suhas committed
692
            if step_size % 1:
693
694
                raise ValueError('strange number of bins per UDVS step. Exiting')

Somnath, Suhas's avatar
Somnath, Suhas committed
695
            step_size = int(step_size)
696

697
698
        rand_spectra = self._get_random_spectra(parsers, self.h5_raw.shape[0], udvs_steps, step_size,
                                                num_spectra=self.num_rand_spectra)
699
        take_conjugate = requires_conjugate(rand_spectra, cores=self._cores)
700

Somnath, Suhas's avatar
Somnath, Suhas committed
701
702
703
704
        self.mean_resp = np.zeros(shape=(self.h5_raw.shape[1]), dtype=np.complex64)
        self.max_resp = np.zeros(shape=(self.h5_raw.shape[0]), dtype=np.float32)
        self.min_resp = np.zeros(shape=(self.h5_raw.shape[0]), dtype=np.float32)

Unknown's avatar
Unknown committed
705
        numpix = self.h5_raw.shape[0]
Somnath, Suhas's avatar
Somnath, Suhas committed
706
707
708
        """ 
        Don't try to do the last step if a pixel is missing.   
        This will be handled after the loop. 
Unknown's avatar
Unknown committed
709
710
711
712
        """
        if add_pixel:
            numpix -= 1

Somnath, Suhas's avatar
Somnath, Suhas committed
713
        for pix_indx in range(numpix):
Somnath, Suhas's avatar
Somnath, Suhas committed
714
            if self.h5_raw.shape[0] > 5:
Unknown's avatar
Unknown committed
715
716
717
                if pix_indx % int(round(self.h5_raw.shape[0] / 10)) == 0:
                    print('Reading... {} complete'.format(round(100 * pix_indx / self.h5_raw.shape[0])))

Somnath, Suhas's avatar
Somnath, Suhas committed
718
719
720
            # get the raw stream from each parser
            pxl_data = list()
            for prsr in parsers:
Somnath, Suhas's avatar
Somnath, Suhas committed
721
                pxl_data.append(prsr.read_pixel())
Unknown's avatar
Unknown committed
722

Somnath, Suhas's avatar
Somnath, Suhas committed
723
724
725
726
727
            # interleave if both in and out of field
            # we are ignoring user defined possibilities...
            if mode == 'in and out-of-field':
                in_fld = pxl_data[0]
                out_fld = pxl_data[1]
Unknown's avatar
Unknown committed
728

Somnath, Suhas's avatar
Somnath, Suhas committed
729
730
                in_fld_2 = in_fld.reshape(udvs_steps, step_size)
                out_fld_2 = out_fld.reshape(udvs_steps, step_size)
Unknown's avatar
Unknown committed
731
                raw_mat = np.empty((udvs_steps * 2, step_size), dtype=out_fld.dtype)
Somnath, Suhas's avatar
Somnath, Suhas committed
732
733
                raw_mat[0::2, :] = in_fld_2
                raw_mat[1::2, :] = out_fld_2
Somnath, Suhas's avatar
Somnath, Suhas committed
734
735
                raw_vec = raw_mat.reshape(in_fld.size + out_fld.size).transpose()
            else:
Somnath, Suhas's avatar
Somnath, Suhas committed
736
                raw_vec = pxl_data[0]  # only one parser
Somnath, Suhas's avatar
Somnath, Suhas committed
737
738
            self.max_resp[pix_indx] = np.max(np.abs(raw_vec))
            self.min_resp[pix_indx] = np.min(np.abs(raw_vec))
Unknown's avatar
Unknown committed
739
            self.mean_resp = (1 / (pix_indx + 1)) * (raw_vec + pix_indx * self.mean_resp)
740
741
742

            if take_conjugate:
                raw_vec = np.conjugate(raw_vec)
743
            self.h5_raw[pix_indx, :] = np.complex64(raw_vec[:])
744
            self.h5_raw.file.flush()
Unknown's avatar
Unknown committed
745

Somnath, Suhas's avatar
Somnath, Suhas committed
746
        # Add zeros to main_data for the missing pixel. 
Unknown's avatar
Unknown committed
747
748
749
        if add_pixel:
            self.h5_raw[-1, :] = 0 + 0j

Somnath, Suhas's avatar
Somnath, Suhas committed
750
        print('---- Finished reading files -----')
751

ssomnath's avatar
ssomnath committed
752
    def _quick_read_data(self, real_path, imag_path, udvs_steps):
Somnath, Suhas's avatar
Somnath, Suhas committed
753
        """
Somnath, Suhas's avatar
Somnath, Suhas committed
754
755
756
757
758
759
760
761
        Returns information about the excitation BE waveform present in the .mat file

        Parameters
        -----------
        real_path : String / Unicode
            Absolute file path of the real data file
        imag_path : String / Unicode
            Absolute file path of the real data file
762
763
        udvs_steps : unsigned int
            Number of UDVS steps
Somnath, Suhas's avatar
Somnath, Suhas committed
764
        """
765
766
        parser = BEodfParser(real_path, imag_path, self.h5_raw.shape[0],
                             self.h5_raw.shape[1] * 4)
767
768

        step_size = self.h5_raw.shape[1] / udvs_steps
769
770
771
772
        rand_spectra = self._get_random_spectra([parser],
                                                self.h5_raw.shape[0],
                                                udvs_steps, step_size,
                                                num_spectra=self.num_rand_spectra,
ssomnath's avatar
ssomnath committed
773
774
                                                verbose=self._verbose)
        if self._verbose:
775
            print('\t\t\tChecking if conjugate is required')
776
        take_conjugate = requires_conjugate(rand_spectra, cores=self._cores)
Somnath, Suhas's avatar
Somnath, Suhas committed
777
        raw_vec = parser.read_all_data()
778
        if take_conjugate:
ssomnath's avatar
ssomnath committed
779
            if self._verbose:
780
                print('\t'*4 + 'Taking conjugate for positive quality factors')
781
            raw_vec = np.conjugate(raw_vec)
Unknown's avatar
Unknown committed
782

Rama Vasudevan's avatar
Rama Vasudevan committed
783
784
        if raw_vec.shape != np.prod(self.h5_raw.shape):
            percentage_padded = 100 * (np.prod(self.h5_raw.shape) - raw_vec.shape) / np.prod(self.h5_raw.shape)
785
            warn('Warning! Raw data length {} is not matching placeholder length {}. '
Rama Vasudevan's avatar
Rama Vasudevan committed
786
787
788
789
790
791
792
793
794
                  'Padding zeros for {}% of the data!'.format(raw_vec.shape, np.prod(self.h5_raw.shape), percentage_padded))

            padded_raw_vec = np.zeros(np.prod(self.h5_raw.shape), dtype = np.complex64)

            padded_raw_vec[:raw_vec.shape[0]] = raw_vec
            raw_mat = padded_raw_vec.reshape(self.h5_raw.shape[0], self.h5_raw.shape[1])
        else:
            raw_mat = raw_vec.reshape(self.h5_raw.shape[0], self.h5_raw.shape[1])

Somnath, Suhas's avatar
Somnath, Suhas committed
795
        # Write to the h5 dataset:
Somnath, Suhas's avatar
Somnath, Suhas committed
796
797
798
        self.mean_resp = np.mean(raw_mat, axis=0)
        self.max_resp = np.amax(np.abs(raw_mat), axis=0)
        self.min_resp = np.amin(np.abs(raw_mat), axis=0)
799
        self.h5_raw[:, :] = np.complex64(raw_mat)
800
        self.h5_raw.file.flush()
Somnath, Suhas's avatar
Somnath, Suhas committed
801

Unknown's avatar
Unknown committed
802
803
        print('---- Finished reading files -----')

804
805
    @staticmethod
    def _parse_file_path(data_filepath):
Somnath, Suhas's avatar
Somnath, Suhas committed
806
807
808
809
810
811
812
        """
        Returns the basename and a dictionary containing the absolute file paths for the
        real and imaginary data files, text and mat parameter files in a dictionary
        
        Parameters 
        --------------------
        data_filepath: String / Unicode
Somnath, Suhas's avatar
Somnath, Suhas committed
813
            Absolute path of any file in the same directory as the .dat files
Somnath, Suhas's avatar
Somnath, Suhas committed
814
815
816
817
818
819
820
821
822
        
        Returns 
        --------------------
        basename : String / Unicode
            Basename of the dataset      
        path_dict : Dictionary
            Dictionary containing absolute paths of all necessary data and parameter files
        """
        (folder_path, basename) = path.split(data_filepath)
Unknown's avatar
Unknown committed
823
        (super_folder, basename) = path.split(folder_path)
Somnath, Suhas's avatar
Somnath, Suhas committed
824

825
826
        if basename.endswith('_d') or basename.endswith('_c'):
            # Old old data format where the folder ended with a _d or _c to denote a completed spectroscopic run
Somnath, Suhas's avatar
Somnath, Suhas committed
827
828
829
830
831
832
833
834
            basename = basename[:-2]
        """
        A single pair of real and imaginary files are / were generated for:
            BE-Line and BEPS (compiled version only generated out-of-field or 'read')
        Two pairs of real and imaginary files were generated for later BEPS datasets
            These have 'read' and 'write' prefixes to denote out or in field respectively
        """
        path_dict = dict()
Unknown's avatar
Unknown committed
835

Somnath, Suhas's avatar
Somnath, Suhas committed
836
        for file_name in listdir(folder_path):
Chris Smith's avatar
Chris Smith committed
837
            abs_path = path.join(folder_path, file_name)
Somnath, Suhas's avatar
Somnath, Suhas committed
838
839
840
841
842
            if file_name.endswith('.txt') and file_name.find('parm') > 0:
                path_dict['parm_txt'] = abs_path
            elif file_name.find('.mat') > 0:
                if file_name.find('more_parms') > 0:
                    path_dict['parm_mat'] = abs_path
Unknown's avatar
Unknown committed
843
                elif file_name == (basename + '.mat'):
Somnath, Suhas's avatar
Somnath, Suhas committed
844
845
846
847
848
849
850
851
852
853
854
855
                    path_dict['old_mat_parms'] = abs_path
            elif file_name.endswith('.dat'):
                # Need to account for the second AI channel here
                file_tag = 'read'
                if file_name.find('write') > 0:
                    file_tag = 'write'
                if file_name.find('real') > 0:
                    file_tag += '_real'
                elif file_name.find('imag') > 0:
                    file_tag += '_imag'
                path_dict[file_tag] = abs_path

Chris Smith's avatar
Chris Smith committed
856
        return basename, path_dict
Somnath, Suhas's avatar
Somnath, Suhas committed
857

ssomnath's avatar
ssomnath committed
858
    def _read_secondary_channel(self, h5_meas_group, aux_file_path):
859
860
861
862
863
864
865
866
867
868
869
870
        """
        Reads secondary channel stored in AI .mat file
        Currently works for in-field measurements only, but should be updated to
        include both in and out of field measurements

        Parameters
        -----------
        h5_meas_group : h5 group
            Reference to the Measurement group
        aux_file_path : String / Unicode
            Absolute file path of the secondary channel file.
        """
ssomnath's avatar
ssomnath committed
871
        if self._verbose:
872
            print('\t---------- Reading Secondary Channel  ----------')
873
        if isinstance(aux_file_path, (list, tuple)):
874
875
876
877
            aux_file_paths = aux_file_path
        else:
            aux_file_paths = list(aux_file_path)

878
        is_in_out_field = 'Field' in self.h5_raw.spec_dim_labels
879

880
881
882
883
884
885
886
887
888
        if not is_in_out_field and len(aux_file_paths) > 1:
            # TODO: Find a better way to handle this
            warn('\t\tField was not varied but found more than one file for '
                 'secondary channel: {}.\n\t\tResults will be overwritten'
                 ''.format([path.split(item)[-1] for item in aux_file_paths]))
        elif is_in_out_field and len(aux_file_paths) == 1:
            warn('\t\tField was varied but only one data file for secondary'
                 'channel was found. Half the data will be zeros')

889
        spectral_len = 1
890
891
892
        for dim_name, dim_size in zip(self.h5_raw.spec_dim_labels,
                                      self.h5_raw.spec_dim_sizes):
            if dim_name == 'Frequency':
893
                continue
894
            spectral_len = spectral_len * dim_size
895

896
        num_pix = self.h5_raw.shape[0]
ssomnath's avatar
ssomnath committed
897
        if