be_odf.py 76.1 KB
Newer Older
Somnath, Suhas's avatar
Somnath, Suhas committed
1
2
3
4
5
6
7
# -*- coding: utf-8 -*-
"""
Created on Tue Nov  3 15:24:12 2015

@author: Suhas Somnath, Stephen Jesse
"""

8
from __future__ import division, print_function, absolute_import, unicode_literals
9

Somnath, Suhas's avatar
Somnath, Suhas committed
10
from os import path, listdir, remove
11
import sys
12
import datetime
13
from warnings import warn
14
import h5py
Somnath, Suhas's avatar
Somnath, Suhas committed
15
16
import numpy as np
from scipy.io.matlab import loadmat  # To load parameters stored in Matlab .mat file
17

18
from .df_utils.be_utils import trimUDVS, getSpectroscopicParmLabel, parmsToDict, generatePlotGroups, \
19
20
    createSpecVals, requires_conjugate, generate_bipolar_triangular_waveform, \
    infer_bipolar_triangular_fraction_phase, nf32
21
from pyUSID.io.reg_ref import write_region_references
22
from pyUSID.io.translator import Translator
23
from pyUSID.io.write_utils import INDICES_DTYPE, VALUES_DTYPE, Dimension, calc_chunks
24
from pyUSID.io.hdf_utils import write_ind_val_dsets, write_main_dataset, \
25
    create_indexed_group, write_simple_attrs, write_book_keeping_attrs, copy_attributes,\
26
    write_reduced_anc_dsets, get_unit_values
27
from pyUSID.io.usi_data import USIDataset
28
from pyUSID.processing.comp_utils import get_available_memory
29

30
31
32
if sys.version_info.major == 3:
    unicode = str

33

Somnath, Suhas's avatar
Somnath, Suhas committed
34
35
36
37
38
class BEodfTranslator(Translator):
    """
    Translates either the Band Excitation (BE) scan or Band Excitation 
    Polarization Switching (BEPS) data format from the old data format(s) to .h5
    """
Unknown's avatar
Unknown committed
39

Chris Smith's avatar
Chris Smith committed
40
41
42
    def __init__(self, *args, **kwargs):
        super(BEodfTranslator, self).__init__(*args, **kwargs)
        self.h5_raw = None
43
        self.num_rand_spectra = kwargs.pop('num_rand_spectra', 1000)
44
        self._cores = kwargs.pop('cores', None)
Unknown's avatar
Unknown committed
45
46
47
        self.FFT_BE_wave = None
        self.signal_type = None
        self.expt_type = None
48
        self._verbose = False
Chris Smith's avatar
Chris Smith committed
49

50
    @staticmethod
51
    def is_valid_file(data_path):
52
53
54
55
56
        """
        Checks whether the provided file can be read by this translator

        Parameters
        ----------
57
        data_path : str
58
59
60
61
            Path to raw data file

        Returns
        -------
62
63
64
65
        obj : str
            Path to file that will be accepted by the translate() function if
            this translator is indeed capable of translating the provided file.
            Otherwise, None will be returned
66
        """
67
68
69
70
71
72
73
74
        if not isinstance(data_path, (str, unicode)):
            raise TypeError('data_path must be a string')

        ndf = 'newdataformat'

        data_path = path.abspath(data_path)

        if path.isfile(data_path):
75
76
77
78
            ext = data_path.split('.')[-1]
            if ext.lower() not in ['jpg', 'png', 'jpeg', 'tiff', 'mat', 'txt',
                                   'dat', 'xls', 'xlsx']:
                return None
79
80
            # we only care about the folder names at this point...
            data_path, _ = path.split(data_path)
81
82

        # Check if the data is in the new or old format:
83
84
85
86
87
88
89
        # Check one level up:
        _, dir_name = path.split(data_path)
        if dir_name == ndf:
            # Though this translator could also read the files but the NDF Translator is more robust...
            return None
        # Check one level down:
        if ndf in listdir(data_path):
90
            # Though this translator could also read the files but the NDF Translator is more robust...
91
92
93
            return None

        file_path = path.join(data_path, listdir(path=data_path)[0])
94
95

        _, path_dict = BEodfTranslator._parse_file_path(file_path)
96

97
98
        if any([x.find('bigtime_0') > 0 and x.endswith('.dat') for x in path_dict.values()]):
            # This is a G-mode Line experiment:
99
            return None
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
        if any([x.find('bigtime_0') > 0 and x.endswith('.dat') for x in
                path_dict.values()]):
            # This is a G-mode Line experiment:
            return None

        parm_found = any([piece in path_dict.keys() for piece in
                          ['parm_txt', 'old_mat_parms']])
        real_found = any([piece in path_dict.keys() for piece in
                          ['read_real', 'write_real']])
        imag_found = any([piece in path_dict.keys() for piece in
                          ['read_imag', 'write_imag']])

        if parm_found and real_found and imag_found:
            if 'parm_txt' in path_dict.keys():
                return path_dict['parm_txt']
            else:
                return path_dict['old_mat_parms']
117
        else:
118
            return None
119

120
121
    def translate(self, file_path, show_plots=True, save_plots=True,
                  do_histogram=False, verbose=False):
Somnath, Suhas's avatar
Somnath, Suhas committed
122
123
124
125
126
127
128
129
130
131
132
133
134
135
        """
        Translates .dat data file(s) to a single .h5 file
        
        Parameters
        -------------
        file_path : String / Unicode
            Absolute file path for one of the data files. 
            It is assumed that this file is of the OLD data format.
        show_plots : (optional) Boolean
            Whether or not to show intermediate plots
        save_plots : (optional) Boolean
            Whether or not to save plots to disk
        do_histogram : (optional) Boolean
            Whether or not to construct histograms to visualize data quality. Note - this takes a fair amount of time
136
137
        verbose : (optional) Boolean
            Whether or not to print statements
Somnath, Suhas's avatar
Somnath, Suhas committed
138
139
140
141
142
143
            
        Returns
        ----------
        h5_path : String / Unicode
            Absolute path of the resultant .h5 file
        """
ssomnath's avatar
ssomnath committed
144
145
        self._verbose = verbose

146
        file_path = path.abspath(file_path)
Somnath, Suhas's avatar
Somnath, Suhas committed
147
        (folder_path, basename) = path.split(file_path)
148
        (basename, path_dict) = self._parse_file_path(file_path)
Unknown's avatar
Unknown committed
149

Somnath, Suhas's avatar
Somnath, Suhas committed
150
        h5_path = path.join(folder_path, basename + '.h5')
Somnath, Suhas's avatar
Somnath, Suhas committed
151
152
        tot_bins_multiplier = 1
        udvs_denom = 2
Unknown's avatar
Unknown committed
153

Somnath, Suhas's avatar
Somnath, Suhas committed
154
        if 'parm_txt' in path_dict.keys():
ssomnath's avatar
ssomnath committed
155
            if self._verbose:
156
                print('\treading parameters from text file')
ssomnath's avatar
ssomnath committed
157
158
            isBEPS, parm_dict = parmsToDict(path_dict['parm_txt'])

Somnath, Suhas's avatar
Somnath, Suhas committed
159
        elif 'old_mat_parms' in path_dict.keys():
ssomnath's avatar
ssomnath committed
160
            if self._verbose:
161
                print('\treading parameters from old mat file')
ssomnath's avatar
ssomnath committed
162
            parm_dict = self._get_parms_from_old_mat(path_dict['old_mat_parms'], verbose=self._verbose)
163
164
165
166
            if parm_dict['VS_steps_per_full_cycle'] == 0:
                isBEPS=False
            else:
                isBEPS=True
Somnath, Suhas's avatar
Somnath, Suhas committed
167
        else:
ssomnath's avatar
ssomnath committed
168
169
            raise FileNotFoundError('No parameters file found! Cannot '
                                    'translate this dataset!')
170

ssomnath's avatar
ssomnath committed
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
        # Initial text files named some parameters differently:
        for case in [('VS_mode', 'AC modulation mode',
                      'AC modulation mode with time reversal'),
                     ('VS_mode', 'load Arbitrary VS Wave from text file',
                      'load user defined VS Wave from file'),
                     ('BE_phase_content', 'chirp', 'chirp-sinc hybrid'),]:
            key, wrong_val, corr_val = case
            if key not in parm_dict.keys():
                continue
            if parm_dict[key] == wrong_val:
                warn('Updating parameter "{}" from invalid value of "{}" to '
                     '"{}"'.format(key, wrong_val, corr_val))
                parm_dict[key] = corr_val

        # Some .mat files did not set correct values to some parameters:
        for case in [('BE_amplitude_[V]', 1E-2, 0.5151),
                     ('VS_amplitude_[V]', 1E-2, 0.9876)]:
            key, min_val, new_val = case
            if key not in parm_dict.keys():
                continue
            if parm_dict[key] < min_val:
                warn('Updating parameter "{}" from invalid value of {} to {}'
                     ''.format(key, parm_dict[key], new_val))
                parm_dict[key] = new_val
195

ssomnath's avatar
ssomnath committed
196
        if self._verbose:
197
198
            keys = list(parm_dict.keys())
            keys.sort()
199
            print('\tExperiment parameters:')
200
201
202
203
            for key in keys:
                print('\t\t{} : {}'.format(key, parm_dict[key]))

            print('\n\tisBEPS = {}'.format(isBEPS))
Unknown's avatar
Unknown committed
204

Somnath, Suhas's avatar
Somnath, Suhas committed
205
206
207
        ignored_plt_grps = []
        if isBEPS:
            parm_dict['data_type'] = 'BEPSData'
Unknown's avatar
Unknown committed
208

Somnath, Suhas's avatar
Somnath, Suhas committed
209
210
            field_mode = parm_dict['VS_measure_in_field_loops']
            std_expt = parm_dict['VS_mode'] != 'load user defined VS Wave from file'
Unknown's avatar
Unknown committed
211

Somnath, Suhas's avatar
Somnath, Suhas committed
212
            if not std_expt:
213
                raise ValueError('This translator does not handle user defined voltage spectroscopy')
Unknown's avatar
Unknown committed
214
215
216

            spec_label = getSpectroscopicParmLabel(parm_dict['VS_mode'])

Somnath, Suhas's avatar
Somnath, Suhas committed
217
            if parm_dict['VS_mode'] in ['DC modulation mode', 'current mode']:
Somnath, Suhas's avatar
Somnath, Suhas committed
218
219
220
221
222
223
224
225
226
227
228
                if field_mode == 'in and out-of-field':
                    tot_bins_multiplier = 2
                    udvs_denom = 1
                else:
                    if field_mode == 'out-of-field':
                        ignored_plt_grps = ['in-field']
                    else:
                        ignored_plt_grps = ['out-of-field']
            else:
                tot_bins_multiplier = 1
                udvs_denom = 1
Unknown's avatar
Unknown committed
229

Somnath, Suhas's avatar
Somnath, Suhas committed
230
231
232
        else:
            spec_label = 'None'
            parm_dict['data_type'] = 'BELineData'
Unknown's avatar
Unknown committed
233

Somnath, Suhas's avatar
Somnath, Suhas committed
234
        # Check file sizes:
ssomnath's avatar
ssomnath committed
235
        if self._verbose:
236
237
            print('\tChecking sizes of real and imaginary data files')

Somnath, Suhas's avatar
Somnath, Suhas committed
238
        if 'read_real' in path_dict.keys():
Somnath, Suhas's avatar
Somnath, Suhas committed
239
240
            real_size = path.getsize(path_dict['read_real'])
            imag_size = path.getsize(path_dict['read_imag'])
Somnath, Suhas's avatar
Somnath, Suhas committed
241
242
243
        else:
            real_size = path.getsize(path_dict['write_real'])
            imag_size = path.getsize(path_dict['write_imag'])
Unknown's avatar
Unknown committed
244

Somnath, Suhas's avatar
Somnath, Suhas committed
245
        if real_size != imag_size:
ssomnath's avatar
ssomnath committed
246
247
248
249
            raise ValueError("Real and imaginary file sizes do not match!")

        if real_size == 0:
            raise ValueError('Real and imaginary files were empty')
Somnath, Suhas's avatar
Somnath, Suhas committed
250

251
        # Check here if a second channel for current is present
252
253
        # Look for the file containing the current data

ssomnath's avatar
ssomnath committed
254
        if self._verbose:
255
            print('\tLooking for secondary channels')
256
257
        file_names = listdir(folder_path)
        aux_files = []
Unknown's avatar
Unknown committed
258
        current_data_exists = False
259
260
261
262
263
264
265
        for fname in file_names:
            if 'AI2' in fname:
                if 'write' in fname:
                    current_file = path.join(folder_path, fname)
                    current_data_exists=True
                aux_files.append(path.join(folder_path, fname))

Unknown's avatar
Unknown committed
266
        add_pix = False
Somnath, Suhas's avatar
Somnath, Suhas committed
267
268
        num_rows = int(parm_dict['grid_num_rows'])
        num_cols = int(parm_dict['grid_num_cols'])
ssomnath's avatar
ssomnath committed
269
        if self._verbose:
270
            print('\tRows: {}, Cols: {}'.format(num_rows, num_cols))
Unknown's avatar
Unknown committed
271
272
        num_pix = num_rows * num_cols
        tot_bins = real_size / (num_pix * 4)
Chris Smith's avatar
Chris Smith committed
273
        # Check for case where only a single pixel is missing.
274
275
276
277
        if num_pix == 1:
            check_bins = real_size / (num_pix * 4)
        else:
            check_bins = real_size / ((num_pix - 1) * 4)
Unknown's avatar
Unknown committed
278

ssomnath's avatar
ssomnath committed
279
        if self._verbose:
280
281
282
            print('\tChecking bins: Total: {}, actual: {}'.format(tot_bins,
                                                                  check_bins))

Unknown's avatar
Unknown committed
283
        if tot_bins % 1 and check_bins % 1:
284
285
            raise ValueError('Aborting! Some parameter appears to have '
                             'changed in-between')
Somnath, Suhas's avatar
Somnath, Suhas committed
286
        elif not tot_bins % 1:
Chris Smith's avatar
Chris Smith committed
287
            # Everything's ok
Somnath, Suhas's avatar
Somnath, Suhas committed
288
289
290
            pass
        elif not check_bins % 1:
            tot_bins = check_bins
291
292
            warn('Warning:  A pixel seems to be missing from the data. '
                 'File will be padded with zeros.')
Unknown's avatar
Unknown committed
293
294
            add_pix = True

295
296
297
298
299
300
        # This would crash and fail later if not fixed here
        # I don't like this hacky approach to solve this problem
        if isBEPS and tot_bins % 1 == 0 and parm_dict['VS_mode'] != 'Custom':
            bins_per_step = parm_dict['FORC_num_of_FORC_cycles'] * \
                            parm_dict['VS_number_of_cycles'] * \
                            parm_dict['VS_steps_per_full_cycle'] * \
301
                            parm_dict['BE_bins_per_band']
302
303
304
            if verbose:
                print('\t\tNumber of bins per step: calculated: {}, actual {}'
                      ''.format(bins_per_step, tot_bins))
305
306
307
308
309
310
311
312
313
314
315
316
            if bins_per_step > 0:
                if bins_per_step < tot_bins and tot_bins / bins_per_step % 1 == 0:
                    scale = int(tot_bins / bins_per_step)
                    warn('Number of actual ({}) bins per step {}X larger than '
                         'calculated ({}) values. Will scale VS cycles to get '
                         'number of bins to match'
                         ''.format(tot_bins, scale, bins_per_step))
                    parm_dict['VS_number_of_cycles'] *= scale
            else:
                if verbose:
                    print('\t\tUnable to calculate number of bins per step '
                          'since one or more parameters were 0')
317

Unknown's avatar
Unknown committed
318
319
        tot_bins = int(tot_bins) * tot_bins_multiplier

Somnath, Suhas's avatar
Somnath, Suhas committed
320
        if isBEPS:
ssomnath's avatar
ssomnath committed
321
            if self._verbose:
322
                print('\tBuilding UDVS table for BEPS')
ssomnath's avatar
ssomnath committed
323
            UDVS_labs, UDVS_units, UDVS_mat = self._build_udvs_table(parm_dict)
Unknown's avatar
Unknown committed
324

ssomnath's avatar
ssomnath committed
325
            if self._verbose:
326
                print('\tTrimming UDVS table to remove unused plot group columns')
327

328
            UDVS_mat, UDVS_labs, UDVS_units = trimUDVS(UDVS_mat, UDVS_labs, UDVS_units, ignored_plt_grps)
Unknown's avatar
Unknown committed
329

330
            old_spec_inds = np.zeros(shape=(2, tot_bins), dtype=INDICES_DTYPE)
Unknown's avatar
Unknown committed
331

332
            # Will assume that all excitation waveforms have same num of bins
Unknown's avatar
Unknown committed
333
334
            num_actual_udvs_steps = UDVS_mat.shape[0] / udvs_denom
            bins_per_step = tot_bins / num_actual_udvs_steps
ssomnath's avatar
ssomnath committed
335
            if self._verbose:
336
337
                print('\t# UDVS steps: {}, # bins/step: {}'
                      ''.format(num_actual_udvs_steps, bins_per_step))
Unknown's avatar
Unknown committed
338

Somnath, Suhas's avatar
Somnath, Suhas committed
339
            if bins_per_step % 1:
Somnath, Suhas's avatar
Somnath, Suhas committed
340
341
                print('UDVS mat shape: {}, total bins: {}, bins per step: {}'.format(UDVS_mat.shape, tot_bins,
                                                                                     bins_per_step))
342
                raise ValueError('Non integer number of bins per step!')
Unknown's avatar
Unknown committed
343

Somnath, Suhas's avatar
Somnath, Suhas committed
344
345
            bins_per_step = int(bins_per_step)
            num_actual_udvs_steps = int(num_actual_udvs_steps)
Unknown's avatar
Unknown committed
346

347
348
349
            if len(np.unique(UDVS_mat[:, 2])) == 0:
                raise ValueError('No non-zero rows in AC amplitude')

Unknown's avatar
Unknown committed
350
351
            stind = 0
            for step_index in range(UDVS_mat.shape[0]):
Unknown's avatar
Unknown committed
352
353
354
                if UDVS_mat[step_index, 2] < 1E-3:  # invalid AC amplitude
                    continue
                # Bin step
355
                old_spec_inds[0, stind:stind + bins_per_step] = np.arange(bins_per_step, dtype=INDICES_DTYPE)
Unknown's avatar
Unknown committed
356
                # UDVS step
357
                old_spec_inds[1, stind:stind + bins_per_step] = step_index * np.ones(bins_per_step, dtype=INDICES_DTYPE)
Somnath, Suhas's avatar
Somnath, Suhas committed
358
                stind += bins_per_step
Somnath, Suhas's avatar
Somnath, Suhas committed
359
            del stind, step_index
Unknown's avatar
Unknown committed
360

Somnath, Suhas's avatar
Somnath, Suhas committed
361
        else:  # BE Line
ssomnath's avatar
ssomnath committed
362
            if self._verbose:
363
                print('\tPreparing supporting variables since BE-Line')
Somnath, Suhas's avatar
Somnath, Suhas committed
364
            self.signal_type = 1
Somnath, Suhas's avatar
Somnath, Suhas committed
365
            self.expt_type = 1  # Stephen has not used this index for some reason
Somnath, Suhas's avatar
Somnath, Suhas committed
366
367
            num_actual_udvs_steps = 1
            bins_per_step = tot_bins
Somnath, Suhas's avatar
Somnath, Suhas committed
368
            UDVS_labs = ['step_num', 'dc_offset', 'ac_amp', 'wave_type', 'wave_mod', 'be-line']
Somnath, Suhas's avatar
Somnath, Suhas committed
369
            UDVS_units = ['', 'V', 'A', '', '', '']
Somnath, Suhas's avatar
Somnath, Suhas committed
370
371
            UDVS_mat = np.array([1, 0, parm_dict['BE_amplitude_[V]'], 1, 1, 1],
                                dtype=np.float32).reshape(1, len(UDVS_labs))
Somnath, Suhas's avatar
Somnath, Suhas committed
372

Chris Smith's avatar
Chris Smith committed
373
374
            old_spec_inds = np.vstack((np.arange(tot_bins, dtype=INDICES_DTYPE),
                                       np.zeros(tot_bins, dtype=INDICES_DTYPE)))
Unknown's avatar
Unknown committed
375

376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
        if 'parm_mat' in path_dict.keys():
            if self._verbose:
                print('\treading BE arrays from parameters text file')
            bin_inds, bin_freqs, bin_FFT, ex_wfm = self._read_parms_mat(path_dict['parm_mat'], isBEPS)
        elif 'old_mat_parms' in path_dict.keys():
            if self._verbose:
                print('\treading BE arrays from old mat text file')
            bin_inds, bin_freqs, bin_FFT, ex_wfm, dc_amp_vec = self._read_old_mat_be_vecs(path_dict['old_mat_parms'], verbose=verbose)
        else:
            warn('No secondary parameters file (.mat) provided. Generating '
                 'dummy BE arrays')
            band_width = parm_dict['BE_band_width_[Hz]'] * (0.5 - parm_dict['BE_band_edge_trim'])
            st_f = parm_dict['BE_center_frequency_[Hz]'] - band_width
            en_f = parm_dict['BE_center_frequency_[Hz]'] + band_width
            bin_freqs = np.linspace(st_f, en_f, bins_per_step, dtype=np.float32)

            if verbose:
ssomnath's avatar
ssomnath committed
393
394
                print('\tGenerating BE arrays of length: '
                      '{}'.format(bins_per_step))
395
396
397
398
399
400
401
402
403
404
405
406
            bin_inds = np.zeros(shape=bins_per_step, dtype=np.int32)
            bin_FFT = np.zeros(shape=bins_per_step, dtype=np.complex64)
            ex_wfm = np.zeros(shape=bins_per_step, dtype=np.float32)

        # Forcing standardized datatypes:
        bin_inds = np.int32(bin_inds)
        bin_freqs = np.float32(bin_freqs)
        bin_FFT = np.complex64(bin_FFT)
        ex_wfm = np.float32(ex_wfm)

        self.FFT_BE_wave = bin_FFT

407
        # legacy parmeters inserted for BEAM
Somnath, Suhas's avatar
Somnath, Suhas committed
408
409
        parm_dict['num_bins'] = tot_bins
        parm_dict['num_pix'] = num_pix
410
        parm_dict['num_udvs_steps'] = num_actual_udvs_steps
Rama Vasudevan's avatar
Rama Vasudevan committed
411
        parm_dict['num_steps'] = num_actual_udvs_steps
Unknown's avatar
Unknown committed
412

ssomnath's avatar
ssomnath committed
413
        if self._verbose:
414
            print('\tPreparing UDVS slices for region references')
Somnath, Suhas's avatar
Somnath, Suhas committed
415
        udvs_slices = dict()
Somnath, Suhas's avatar
Somnath, Suhas committed
416
        for col_ind, col_name in enumerate(UDVS_labs):
Unknown's avatar
Unknown committed
417
418
            udvs_slices[col_name] = (slice(None), slice(col_ind, col_ind + 1))

Somnath, Suhas's avatar
Somnath, Suhas committed
419
        # Need to add the Bin Waveform type - infer from UDVS        
Unknown's avatar
Unknown committed
420
        exec_bin_vec = self.signal_type * np.ones(len(bin_inds), dtype=np.int32)
Somnath, Suhas's avatar
Somnath, Suhas committed
421
422

        if self.expt_type == 2:
ssomnath's avatar
ssomnath committed
423
            if self._verbose:
424
                print('\tExperiment type = 2. Doubling BE vectors')
Unknown's avatar
Unknown committed
425
            exec_bin_vec = np.hstack((exec_bin_vec, -1 * exec_bin_vec))
Somnath, Suhas's avatar
Somnath, Suhas committed
426
427
            bin_inds = np.hstack((bin_inds, bin_inds))
            bin_freqs = np.hstack((bin_freqs, bin_freqs))
Somnath, Suhas's avatar
Somnath, Suhas committed
428
            # This is wrong but I don't know what else to do
Somnath, Suhas's avatar
Somnath, Suhas committed
429
            bin_FFT = np.hstack((bin_FFT, bin_FFT))
Unknown's avatar
Unknown committed
430

Somnath, Suhas's avatar
Somnath, Suhas committed
431
        # Create Spectroscopic Values and Spectroscopic Values Labels datasets
432
        # This is an old and legacy way of doing things. Ideally, all we would need ot do is just get the unit values
ssomnath's avatar
ssomnath committed
433
        if self._verbose:
434
            print('\tCalculating spectroscopic values')
ssomnath's avatar
ssomnath committed
435
436
437
438
        ret_vals = createSpecVals(UDVS_mat, old_spec_inds, bin_freqs,
                                  exec_bin_vec, parm_dict, UDVS_labs,
                                  UDVS_units, verbose=verbose)
        spec_vals, spec_inds, spec_vals_labs, spec_vals_units, spec_vals_labs_names = ret_vals
439

ssomnath's avatar
ssomnath committed
440
        if self._verbose:
441
            print('\t\tspec_vals_labs: {}'.format(spec_vals_labs))
442
443
444
            unit_vals = get_unit_values(spec_inds, spec_vals,
                                        all_dim_names=spec_vals_labs,
                                        is_spec=True, verbose=False)
445
446
447
448
            print('\tUnit spectroscopic values')
            for key, val in unit_vals.items():
                print('\t\t{} : length: {}, values:\n\t\t\t{}'.format(key, len(val), val))

449
450
451
452
        if spec_inds.shape[1] != tot_bins:
            raise ValueError('Second axis of spectroscopic indices: {} not '
                             'matching with second axis of the expected main '
                             'dataset: {}'.format(spec_inds.shape, tot_bins))
453

454
455
456
457
        # Not sure what is happening here but this should work.
        spec_dim_dict = dict()
        for entry in spec_vals_labs_names:
            spec_dim_dict[entry[0] + '_parameters'] = entry[1]
Chris Smith's avatar
Chris Smith committed
458

Somnath, Suhas's avatar
Somnath, Suhas committed
459
460
461
        spec_vals_slices = dict()

        for row_ind, row_name in enumerate(spec_vals_labs):
Unknown's avatar
Unknown committed
462
            spec_vals_slices[row_name] = (slice(row_ind, row_ind + 1), slice(None))
Somnath, Suhas's avatar
Somnath, Suhas committed
463

464
        if path.exists(h5_path):
ssomnath's avatar
ssomnath committed
465
            if self._verbose:
466
                print('\tRemoving existing / old translated file: ' + h5_path)
467
            remove(h5_path)
Chris Smith's avatar
Chris Smith committed
468

469
        # First create the file
ssomnath's avatar
ssomnath committed
470
        h5_f = h5py.File(h5_path, mode='w')
Somnath, Suhas's avatar
Somnath, Suhas committed
471

472
        # Then write root level attributes
473
        global_parms = dict()
Somnath, Suhas's avatar
Somnath, Suhas committed
474
475
        global_parms['grid_size_x'] = parm_dict['grid_num_cols']
        global_parms['grid_size_y'] = parm_dict['grid_num_rows']
Somnath, Suhas's avatar
Somnath, Suhas committed
476
477
478
479
        try:
            global_parms['experiment_date'] = parm_dict['File_date_and_time']
        except KeyError:
            global_parms['experiment_date'] = '1:1:1'
Chris Smith's avatar
Chris Smith committed
480

Somnath, Suhas's avatar
Somnath, Suhas committed
481
        # assuming that the experiment was completed:
Unknown's avatar
Unknown committed
482
483
        global_parms['current_position_x'] = parm_dict['grid_num_cols'] - 1
        global_parms['current_position_y'] = parm_dict['grid_num_rows'] - 1
Somnath, Suhas's avatar
Somnath, Suhas committed
484
        global_parms['data_type'] = parm_dict['data_type']
Somnath, Suhas's avatar
Somnath, Suhas committed
485
        global_parms['translator'] = 'ODF'
ssomnath's avatar
ssomnath committed
486
        if self._verbose:
487
            print('\tWriting attributes to HDF5 file root')
488
        write_simple_attrs(h5_f, global_parms)
489
        write_book_keeping_attrs(h5_f)
Unknown's avatar
Unknown committed
490

491
492
        # Then create the measurement group
        h5_meas_group = create_indexed_group(h5_f, 'Measurement')
Unknown's avatar
Unknown committed
493

494
        # Write attributes at the measurement group level
ssomnath's avatar
ssomnath committed
495
        if self._verbose:
496
            print('\twriting attributes to Measurement group')
497
        write_simple_attrs(h5_meas_group, parm_dict)
Unknown's avatar
Unknown committed
498

499
500
        # Create the Channel group
        h5_chan_grp = create_indexed_group(h5_meas_group, 'Channel')
Unknown's avatar
Unknown committed
501

502
        # Write channel group attributes
Rama Vasudevan's avatar
Rama Vasudevan committed
503
504
        write_simple_attrs(h5_chan_grp, {'Channel_Input': 'IO_Analog_Input_1',
                                         'channel_type': 'BE'})
Unknown's avatar
Unknown committed
505

506
        # Now the datasets!
ssomnath's avatar
ssomnath committed
507
        if self._verbose:
508
            print('\tCreating ancillary datasets')
Chris Smith's avatar
Chris Smith committed
509
        h5_chan_grp.create_dataset('Excitation_Waveform', data=ex_wfm)
Unknown's avatar
Unknown committed
510

511
        h5_udvs = h5_chan_grp.create_dataset('UDVS', data=UDVS_mat)
ssomnath's avatar
ssomnath committed
512
513
514
        # TODO: Avoid using region references in USID
        write_region_references(h5_udvs, udvs_slices, add_labels_attr=True, verbose=self._verbose)
        write_simple_attrs(h5_udvs, {'units': UDVS_units}, verbose=False)
515

Chris Smith's avatar
Chris Smith committed
516
        h5_chan_grp.create_dataset('UDVS_Indices', data=old_spec_inds[1])
517

Chris Smith's avatar
Chris Smith committed
518
519
        h5_chan_grp.create_dataset('Bin_Step', data=np.arange(bins_per_step, dtype=INDICES_DTYPE),
                                   dtype=INDICES_DTYPE)
520

Chris Smith's avatar
Chris Smith committed
521
522
523
524
        h5_chan_grp.create_dataset('Bin_Indices', data=bin_inds, dtype=INDICES_DTYPE)
        h5_chan_grp.create_dataset('Bin_Frequencies', data=bin_freqs)
        h5_chan_grp.create_dataset('Bin_FFT', data=bin_FFT)
        h5_chan_grp.create_dataset('Bin_Wfm_Type', data=exec_bin_vec)
525

ssomnath's avatar
ssomnath committed
526
        if self._verbose:
527
528
529
530
            print('\tWriting Position datasets')

        pos_dims = [Dimension('X', 'm', np.arange(num_cols)),
                    Dimension('Y', 'm', np.arange(num_rows))]
ssomnath's avatar
ssomnath committed
531
532
        h5_pos_ind, h5_pos_val = write_ind_val_dsets(h5_chan_grp, pos_dims, is_spectral=False, verbose=self._verbose)
        if self._verbose:
533
            print('\tPosition datasets of shape: {}'.format(h5_pos_ind.shape))
534

ssomnath's avatar
ssomnath committed
535
        if self._verbose:
536
            print('\tWriting Spectroscopic datasets of shape: {}'.format(spec_inds.shape))
537
538
539
        h5_spec_inds = h5_chan_grp.create_dataset('Spectroscopic_Indices', data=spec_inds, dtype=INDICES_DTYPE)        
        h5_spec_vals = h5_chan_grp.create_dataset('Spectroscopic_Values', data=np.array(spec_vals), dtype=VALUES_DTYPE)
        for dset in [h5_spec_inds, h5_spec_vals]:
ssomnath's avatar
ssomnath committed
540
541
            write_region_references(dset, spec_vals_slices, add_labels_attr=True, verbose=self._verbose)
            write_simple_attrs(dset, {'units': spec_vals_units}, verbose=False)
542
            write_simple_attrs(dset, spec_dim_dict)
543
544

        # Noise floor should be of shape: (udvs_steps x 3 x positions)
ssomnath's avatar
ssomnath committed
545
        if self._verbose:
546
            print('\tWriting noise floor dataset')
Chris Smith's avatar
Chris Smith committed
547
548
        h5_chan_grp.create_dataset('Noise_Floor', (num_pix, num_actual_udvs_steps), dtype=nf32,
                                   chunks=(1, num_actual_udvs_steps))
549
550
551
552
553
554
555
556
557
558
559

        """
        New Method for chunking the Main_Data dataset.  Chunking is now done in N-by-N squares
        of UDVS steps by pixels.  N is determined dynamically based on the dimensions of the
        dataset.  Currently it is set such that individual chunks are less than 10kB in size.

        Chris Smith -- csmith55@utk.edu
        """
        BEPS_chunks = calc_chunks([num_pix, tot_bins],
                                  np.complex64(0).itemsize,
                                  unit_chunks=(1, bins_per_step))
ssomnath's avatar
ssomnath committed
560
        if self._verbose:
561
            print('\tHDF5 dataset will have chunks of size: {}'.format(BEPS_chunks))
562
            print('\tCreating empty main dataset of shape: ({}, {})'.format(num_pix, tot_bins))
563
564
565
        self.h5_raw = write_main_dataset(h5_chan_grp, (num_pix, tot_bins), 'Raw_Data', 'Piezoresponse', 'V', None, None,
                                         dtype=np.complex64, chunks=BEPS_chunks, compression='gzip',
                                         h5_pos_inds=h5_pos_ind, h5_pos_vals=h5_pos_val, h5_spec_inds=h5_spec_inds,
ssomnath's avatar
ssomnath committed
566
                                         h5_spec_vals=h5_spec_vals, verbose=self._verbose)
Somnath, Suhas's avatar
Somnath, Suhas committed
567

ssomnath's avatar
ssomnath committed
568
        if self._verbose:
569
570
            print('\tReading data from binary data files into raw HDF5')
        self._read_data(UDVS_mat, parm_dict, path_dict, real_size, isBEPS,
ssomnath's avatar
ssomnath committed
571
                        add_pix)
Unknown's avatar
Unknown committed
572

ssomnath's avatar
ssomnath committed
573
        if self._verbose:
574
            print('\tGenerating plot groups')
575
        generatePlotGroups(self.h5_raw, self.mean_resp, folder_path, basename,
Somnath, Suhas's avatar
Somnath, Suhas committed
576
                           self.max_resp, self.min_resp, max_mem_mb=self.max_ram,
Somnath, Suhas's avatar
Somnath, Suhas committed
577
                           spec_label=spec_label, show_plots=show_plots, save_plots=save_plots,
ssomnath's avatar
ssomnath committed
578
579
                           do_histogram=do_histogram, debug=self._verbose)
        if self._verbose:
580
            print('\tUpgrading to USIDataset')
581
        self.h5_raw = USIDataset(self.h5_raw)
Unknown's avatar
Unknown committed
582
583
584

        # Go ahead and read the current data in the second (current) channel
        if current_data_exists:                     #If a .dat file matches
ssomnath's avatar
ssomnath committed
585
            if self._verbose:
586
                print('\tReading data in secondary channels (current)')
587
            self._read_secondary_channel(h5_meas_group, aux_files)
588

ssomnath's avatar
ssomnath committed
589
        if self._verbose:
590
            print('\tClosing HDF5 file')
591
        h5_f.close()
Unknown's avatar
Unknown committed
592

Somnath, Suhas's avatar
Somnath, Suhas committed
593
        return h5_path
Chris Smith's avatar
Chris Smith committed
594

595
    def _read_data(self, UDVS_mat, parm_dict, path_dict, real_size, isBEPS,
ssomnath's avatar
ssomnath committed
596
                   add_pix):
Chris Smith's avatar
Chris Smith committed
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
        """
        Checks if the data is BEPS or BELine and calls the correct function to read the data from
        file

        Parameters
        ----------
        UDVS_mat : numpy.ndarray of float
            UDVS table
        parm_dict : dict
            Experimental parameters
        path_dict : dict
            Dictionary of data files to be read
        real_size : dict
            Size of each data file
        isBEPS : boolean
            Is the data BEPS
        add_pix : boolean
            Does the reader need to add extra pixels to the end of the dataset

        Returns
        -------
        None
        """
        # Now read the raw data files:
        if not isBEPS:
            # Do this for all BE-Line (always small enough to read in one shot)
ssomnath's avatar
ssomnath committed
623
            if self._verbose:
624
                print('\t\tReading all raw data for BE-Line in one shot')
625
626
            self._quick_read_data(path_dict['read_real'],
                                  path_dict['read_imag'],
ssomnath's avatar
ssomnath committed
627
                                  parm_dict['num_udvs_steps'])
628
629
        elif real_size < self.max_ram and \
                parm_dict['VS_measure_in_field_loops'] == 'out-of-field':
Chris Smith's avatar
Chris Smith committed
630
            # Do this for out-of-field BEPS ONLY that is also small (256 MB)
ssomnath's avatar
ssomnath committed
631
            if self._verbose:
632
633
634
                print('\t\tReading all raw BEPS (out-of-field) data at once')
            self._quick_read_data(path_dict['read_real'],
                                  path_dict['read_imag'],
ssomnath's avatar
ssomnath committed
635
                                  parm_dict['num_udvs_steps'])
636
637
        elif real_size < self.max_ram and \
                parm_dict['VS_measure_in_field_loops'] == 'in-field':
Chris Smith's avatar
Chris Smith committed
638
            # Do this for in-field only
ssomnath's avatar
ssomnath committed
639
            if self._verbose:
640
641
642
                print('\t\tReading all raw BEPS (in-field only) data at once')
            self._quick_read_data(path_dict['write_real'],
                                  path_dict['write_imag'],
ssomnath's avatar
ssomnath committed
643
                                  parm_dict['num_udvs_steps'])
Chris Smith's avatar
Chris Smith committed
644
645
        else:
            # Large BEPS datasets OR those with in-and-out of field
ssomnath's avatar
ssomnath committed
646
            if self._verbose:
647
648
649
650
651
                print('\t\tReading all raw data for in-and-out-of-field OR '
                      'very large file one pixel at a time')
            self._read_beps_data(path_dict, UDVS_mat.shape[0],
                                 parm_dict['VS_measure_in_field_loops'],
                                 add_pix)
652
        self.h5_raw.file.flush()
Chris Smith's avatar
Chris Smith committed
653

654
    def _read_beps_data(self, path_dict, udvs_steps, mode, add_pixel=False):
Somnath, Suhas's avatar
Somnath, Suhas committed
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
        """
        Reads the imaginary and real data files pixelwise and writes to the H5 file 
        
        Parameters 
        --------------------
        path_dict : dictionary
            Dictionary containing the absolute paths of the real and imaginary data files
        udvs_steps : unsigned int
            Number of UDVS steps
        mode : String / Unicode
            'in-field', 'out-of-field', or 'in and out-of-field'
        add_pixel : boolean. (Optional; default is False)
            If an empty pixel worth of data should be written to the end             
        
        Returns 
        -------------------- 
        None
        """
Unknown's avatar
Unknown committed
673

Somnath, Suhas's avatar
Somnath, Suhas committed
674
        print('---- reading pixel-by-pixel ----------')
Unknown's avatar
Unknown committed
675
676
677
678

        bytes_per_pix = self.h5_raw.shape[1] * 4
        step_size = self.h5_raw.shape[1] / udvs_steps

Somnath, Suhas's avatar
Somnath, Suhas committed
679
        if mode == 'out-of-field':
Unknown's avatar
Unknown committed
680
            parsers = [BEodfParser(path_dict['read_real'], path_dict['read_imag'],
Somnath, Suhas's avatar
Somnath, Suhas committed
681
                                   self.h5_raw.shape[0], bytes_per_pix)]
Somnath, Suhas's avatar
Somnath, Suhas committed
682
        elif mode == 'in-field':
Unknown's avatar
Unknown committed
683
            parsers = [BEodfParser(path_dict['write_real'], path_dict['write_imag'],
Somnath, Suhas's avatar
Somnath, Suhas committed
684
                                   self.h5_raw.shape[0], bytes_per_pix)]
Somnath, Suhas's avatar
Somnath, Suhas committed
685
686
        elif mode == 'in and out-of-field':
            # each file will only have half the udvs steps:
Unknown's avatar
Unknown committed
687
            if 0.5 * udvs_steps % 1:
688
689
                raise ValueError('Odd number of UDVS')

Unknown's avatar
Unknown committed
690
            udvs_steps = int(0.5 * udvs_steps)
Somnath, Suhas's avatar
Somnath, Suhas committed
691
            # be careful - each pair contains only half the necessary bins - so read half
Unknown's avatar
Unknown committed
692
            parsers = [BEodfParser(path_dict['write_real'], path_dict['write_imag'],
Somnath, Suhas's avatar
Somnath, Suhas committed
693
                                   self.h5_raw.shape[0], int(bytes_per_pix / 2)),
Unknown's avatar
Unknown committed
694
695
696
                       BEodfParser(path_dict['read_real'], path_dict['read_imag'],
                                   self.h5_raw.shape[0], int(bytes_per_pix / 2))]

Somnath, Suhas's avatar
Somnath, Suhas committed
697
            if step_size % 1:
698
699
                raise ValueError('strange number of bins per UDVS step. Exiting')

Somnath, Suhas's avatar
Somnath, Suhas committed
700
            step_size = int(step_size)
701

702
703
        rand_spectra = self._get_random_spectra(parsers, self.h5_raw.shape[0], udvs_steps, step_size,
                                                num_spectra=self.num_rand_spectra)
704
        take_conjugate = requires_conjugate(rand_spectra, cores=self._cores)
705

Somnath, Suhas's avatar
Somnath, Suhas committed
706
707
708
709
        self.mean_resp = np.zeros(shape=(self.h5_raw.shape[1]), dtype=np.complex64)
        self.max_resp = np.zeros(shape=(self.h5_raw.shape[0]), dtype=np.float32)
        self.min_resp = np.zeros(shape=(self.h5_raw.shape[0]), dtype=np.float32)

Unknown's avatar
Unknown committed
710
        numpix = self.h5_raw.shape[0]
Somnath, Suhas's avatar
Somnath, Suhas committed
711
712
713
        """ 
        Don't try to do the last step if a pixel is missing.   
        This will be handled after the loop. 
Unknown's avatar
Unknown committed
714
715
716
717
        """
        if add_pixel:
            numpix -= 1

Somnath, Suhas's avatar
Somnath, Suhas committed
718
        for pix_indx in range(numpix):
Somnath, Suhas's avatar
Somnath, Suhas committed
719
            if self.h5_raw.shape[0] > 5:
Unknown's avatar
Unknown committed
720
721
722
                if pix_indx % int(round(self.h5_raw.shape[0] / 10)) == 0:
                    print('Reading... {} complete'.format(round(100 * pix_indx / self.h5_raw.shape[0])))

Somnath, Suhas's avatar
Somnath, Suhas committed
723
724
725
            # get the raw stream from each parser
            pxl_data = list()
            for prsr in parsers:
Somnath, Suhas's avatar
Somnath, Suhas committed
726
                pxl_data.append(prsr.read_pixel())
Unknown's avatar
Unknown committed
727

Somnath, Suhas's avatar
Somnath, Suhas committed
728
729
730
731
732
            # interleave if both in and out of field
            # we are ignoring user defined possibilities...
            if mode == 'in and out-of-field':
                in_fld = pxl_data[0]
                out_fld = pxl_data[1]
Unknown's avatar
Unknown committed
733

Somnath, Suhas's avatar
Somnath, Suhas committed
734
735
                in_fld_2 = in_fld.reshape(udvs_steps, step_size)
                out_fld_2 = out_fld.reshape(udvs_steps, step_size)
Unknown's avatar
Unknown committed
736
                raw_mat = np.empty((udvs_steps * 2, step_size), dtype=out_fld.dtype)
Somnath, Suhas's avatar
Somnath, Suhas committed
737
738
                raw_mat[0::2, :] = in_fld_2
                raw_mat[1::2, :] = out_fld_2
Somnath, Suhas's avatar
Somnath, Suhas committed
739
740
                raw_vec = raw_mat.reshape(in_fld.size + out_fld.size).transpose()
            else:
Somnath, Suhas's avatar
Somnath, Suhas committed
741
                raw_vec = pxl_data[0]  # only one parser
Somnath, Suhas's avatar
Somnath, Suhas committed
742
743
            self.max_resp[pix_indx] = np.max(np.abs(raw_vec))
            self.min_resp[pix_indx] = np.min(np.abs(raw_vec))
Unknown's avatar
Unknown committed
744
            self.mean_resp = (1 / (pix_indx + 1)) * (raw_vec + pix_indx * self.mean_resp)
745
746
747

            if take_conjugate:
                raw_vec = np.conjugate(raw_vec)
748
            self.h5_raw[pix_indx, :] = np.complex64(raw_vec[:])
749
            self.h5_raw.file.flush()
Unknown's avatar
Unknown committed
750

Somnath, Suhas's avatar
Somnath, Suhas committed
751
        # Add zeros to main_data for the missing pixel. 
Unknown's avatar
Unknown committed
752
753
754
        if add_pixel:
            self.h5_raw[-1, :] = 0 + 0j

Somnath, Suhas's avatar
Somnath, Suhas committed
755
        print('---- Finished reading files -----')
756

ssomnath's avatar
ssomnath committed
757
    def _quick_read_data(self, real_path, imag_path, udvs_steps):
Somnath, Suhas's avatar
Somnath, Suhas committed
758
        """
Somnath, Suhas's avatar
Somnath, Suhas committed
759
760
761
762
763
764
765
766
        Returns information about the excitation BE waveform present in the .mat file

        Parameters
        -----------
        real_path : String / Unicode
            Absolute file path of the real data file
        imag_path : String / Unicode
            Absolute file path of the real data file
767
768
        udvs_steps : unsigned int
            Number of UDVS steps
Somnath, Suhas's avatar
Somnath, Suhas committed
769
        """
770
771
        parser = BEodfParser(real_path, imag_path, self.h5_raw.shape[0],
                             self.h5_raw.shape[1] * 4)
772
773

        step_size = self.h5_raw.shape[1] / udvs_steps
774
775
776
777
        rand_spectra = self._get_random_spectra([parser],
                                                self.h5_raw.shape[0],
                                                udvs_steps, step_size,
                                                num_spectra=self.num_rand_spectra,
ssomnath's avatar
ssomnath committed
778
779
                                                verbose=self._verbose)
        if self._verbose:
780
            print('\t\t\tChecking if conjugate is required')
781
        take_conjugate = requires_conjugate(rand_spectra, cores=self._cores)
Somnath, Suhas's avatar
Somnath, Suhas committed
782
        raw_vec = parser.read_all_data()
783
        if take_conjugate:
ssomnath's avatar
ssomnath committed
784
            if self._verbose:
785
                print('\t'*4 + 'Taking conjugate for positive quality factors')
786
            raw_vec = np.conjugate(raw_vec)
Unknown's avatar
Unknown committed
787

Rama Vasudevan's avatar
Rama Vasudevan committed
788
789
        if raw_vec.shape != np.prod(self.h5_raw.shape):
            percentage_padded = 100 * (np.prod(self.h5_raw.shape) - raw_vec.shape) / np.prod(self.h5_raw.shape)
790
            warn('Warning! Raw data length {} is not matching placeholder length {}. '
Rama Vasudevan's avatar
Rama Vasudevan committed
791
792
793
794
795
796
797
798
799
                  'Padding zeros for {}% of the data!'.format(raw_vec.shape, np.prod(self.h5_raw.shape), percentage_padded))

            padded_raw_vec = np.zeros(np.prod(self.h5_raw.shape), dtype = np.complex64)

            padded_raw_vec[:raw_vec.shape[0]] = raw_vec
            raw_mat = padded_raw_vec.reshape(self.h5_raw.shape[0], self.h5_raw.shape[1])
        else:
            raw_mat = raw_vec.reshape(self.h5_raw.shape[0], self.h5_raw.shape[1])

Somnath, Suhas's avatar
Somnath, Suhas committed
800
        # Write to the h5 dataset:
Somnath, Suhas's avatar
Somnath, Suhas committed
801
802
803
        self.mean_resp = np.mean(raw_mat, axis=0)
        self.max_resp = np.amax(np.abs(raw_mat), axis=0)
        self.min_resp = np.amin(np.abs(raw_mat), axis=0)
804
        self.h5_raw[:, :] = np.complex64(raw_mat)
805
        self.h5_raw.file.flush()
Somnath, Suhas's avatar
Somnath, Suhas committed
806

Unknown's avatar
Unknown committed
807
808
        print('---- Finished reading files -----')

809
810
    @staticmethod
    def _parse_file_path(data_filepath):
Somnath, Suhas's avatar
Somnath, Suhas committed
811
812
813
814
815
816
817
        """
        Returns the basename and a dictionary containing the absolute file paths for the
        real and imaginary data files, text and mat parameter files in a dictionary
        
        Parameters 
        --------------------
        data_filepath: String / Unicode
Somnath, Suhas's avatar
Somnath, Suhas committed
818
            Absolute path of any file in the same directory as the .dat files
Somnath, Suhas's avatar
Somnath, Suhas committed
819
820
821
822
823
824
825
826
827
        
        Returns 
        --------------------
        basename : String / Unicode
            Basename of the dataset      
        path_dict : Dictionary
            Dictionary containing absolute paths of all necessary data and parameter files
        """
        (folder_path, basename) = path.split(data_filepath)
Unknown's avatar
Unknown committed
828
        (super_folder, basename) = path.split(folder_path)
Somnath, Suhas's avatar
Somnath, Suhas committed
829

830
831
        if basename.endswith('_d') or basename.endswith('_c'):
            # Old old data format where the folder ended with a _d or _c to denote a completed spectroscopic run
Somnath, Suhas's avatar
Somnath, Suhas committed
832
833
834
835
836
837
838
839
            basename = basename[:-2]
        """
        A single pair of real and imaginary files are / were generated for:
            BE-Line and BEPS (compiled version only generated out-of-field or 'read')
        Two pairs of real and imaginary files were generated for later BEPS datasets
            These have 'read' and 'write' prefixes to denote out or in field respectively
        """
        path_dict = dict()
Unknown's avatar
Unknown committed
840

Somnath, Suhas's avatar
Somnath, Suhas committed
841
        for file_name in listdir(folder_path):
Chris Smith's avatar
Chris Smith committed
842
            abs_path = path.join(folder_path, file_name)
Somnath, Suhas's avatar
Somnath, Suhas committed
843
844
845
846
847
            if file_name.endswith('.txt') and file_name.find('parm') > 0:
                path_dict['parm_txt'] = abs_path
            elif file_name.find('.mat') > 0:
                if file_name.find('more_parms') > 0:
                    path_dict['parm_mat'] = abs_path
Unknown's avatar
Unknown committed
848
                elif file_name == (basename + '.mat'):
Somnath, Suhas's avatar
Somnath, Suhas committed
849
850
851
852
853
854
855
856
857
858
859
860
                    path_dict['old_mat_parms'] = abs_path
            elif file_name.endswith('.dat'):
                # Need to account for the second AI channel here
                file_tag = 'read'
                if file_name.find('write') > 0:
                    file_tag = 'write'
                if file_name.find('real') > 0:
                    file_tag += '_real'
                elif file_name.find('imag') > 0:
                    file_tag += '_imag'
                path_dict[file_tag] = abs_path

Chris Smith's avatar
Chris Smith committed
861
        return basename, path_dict
Somnath, Suhas's avatar
Somnath, Suhas committed
862

ssomnath's avatar
ssomnath committed
863
    def _read_secondary_channel(self, h5_meas_group, aux_file_path):
864
865
866
867
868
869
870
871
872
873
874
875
        """
        Reads secondary channel stored in AI .mat file
        Currently works for in-field measurements only, but should be updated to
        include both in and out of field measurements

        Parameters
        -----------
        h5_meas_group : h5 group
            Reference to the Measurement group
        aux_file_path : String / Unicode
            Absolute file path of the secondary channel file.
        """
ssomnath's avatar
ssomnath committed
876
        if self._verbose:
877
            print('\t---------- Reading Secondary Channel  ----------')
878
        if isinstance(aux_file_path, (list, tuple)):
879
880
881
882
            aux_file_paths = aux_file_path
        else:
            aux_file_paths = list(aux_file_path)

883
        is_in_out_field = 'Field' in self.h5_raw.spec_dim_labels
884

885
886
887
888
889
890
891
892
893
        if not is_in_out_field and len(aux_file_paths) > 1:
            # TODO: Find a better way to handle this
            warn('\t\tField was not varied but found more than one file for '
                 'secondary channel: {}.\n\t\tResults will be overwritten'
                 ''.format([path.split(item)[-1] for item in aux_file_paths]))
        elif is_in_out_field and len(aux_file_paths) == 1:
            warn('\t\tField was varied but only one data file for secondary'
                 'channel was found. Half the data will be zeros')

894
        spectral_len = 1
895
896
897
        for dim_name, dim_size in zip(self.h5_raw.spec_dim_labels,
                                      self.h5_raw.spec_dim_sizes):
            if dim_name == 'Frequency':