be_odf.py 76.1 KB
Newer Older
Somnath, Suhas's avatar
Somnath, Suhas committed
1
2
3
4
5
6
7
# -*- coding: utf-8 -*-
"""
Created on Tue Nov  3 15:24:12 2015

@author: Suhas Somnath, Stephen Jesse
"""

8
from __future__ import division, print_function, absolute_import, unicode_literals
9

Somnath, Suhas's avatar
Somnath, Suhas committed
10
from os import path, listdir, remove
11
import sys
12
import datetime
13
from warnings import warn
14
import h5py
Somnath, Suhas's avatar
Somnath, Suhas committed
15
16
import numpy as np
from scipy.io.matlab import loadmat  # To load parameters stored in Matlab .mat file
17

18
from .df_utils.be_utils import trimUDVS, getSpectroscopicParmLabel, parmsToDict, generatePlotGroups, \
19
20
    createSpecVals, requires_conjugate, generate_bipolar_triangular_waveform, \
    infer_bipolar_triangular_fraction_phase, nf32
21
22
23
24
25

from sidpy.hdf.hdf_utils import write_simple_attrs
from sidpy.hdf.reg_ref import write_region_references
from sidpy.sid import Translator
from sidpy.proc.comp_utils import get_available_memory
26
from pyUSID.io.write_utils import INDICES_DTYPE, VALUES_DTYPE, Dimension, calc_chunks
27
from pyUSID.io.hdf_utils import write_ind_val_dsets, write_main_dataset, \
28
    create_indexed_group, write_book_keeping_attrs, copy_attributes,\
29
    write_reduced_anc_dsets, get_unit_values
30
from pyUSID.io.usi_data import USIDataset
31

32
33
34
if sys.version_info.major == 3:
    unicode = str

35

Somnath, Suhas's avatar
Somnath, Suhas committed
36
37
38
39
40
class BEodfTranslator(Translator):
    """
    Translates either the Band Excitation (BE) scan or Band Excitation 
    Polarization Switching (BEPS) data format from the old data format(s) to .h5
    """
Unknown's avatar
Unknown committed
41

Chris Smith's avatar
Chris Smith committed
42
43
44
    def __init__(self, *args, **kwargs):
        super(BEodfTranslator, self).__init__(*args, **kwargs)
        self.h5_raw = None
45
        self.num_rand_spectra = kwargs.pop('num_rand_spectra', 1000)
46
        self._cores = kwargs.pop('cores', None)
Unknown's avatar
Unknown committed
47
48
49
        self.FFT_BE_wave = None
        self.signal_type = None
        self.expt_type = None
50
        self._verbose = False
Chris Smith's avatar
Chris Smith committed
51

52
    @staticmethod
53
    def is_valid_file(data_path):
54
55
56
57
58
        """
        Checks whether the provided file can be read by this translator

        Parameters
        ----------
59
        data_path : str
60
61
62
63
            Path to raw data file

        Returns
        -------
64
65
66
67
        obj : str
            Path to file that will be accepted by the translate() function if
            this translator is indeed capable of translating the provided file.
            Otherwise, None will be returned
68
        """
69
70
71
72
73
74
75
76
        if not isinstance(data_path, (str, unicode)):
            raise TypeError('data_path must be a string')

        ndf = 'newdataformat'

        data_path = path.abspath(data_path)

        if path.isfile(data_path):
77
78
79
80
            ext = data_path.split('.')[-1]
            if ext.lower() not in ['jpg', 'png', 'jpeg', 'tiff', 'mat', 'txt',
                                   'dat', 'xls', 'xlsx']:
                return None
81
82
            # we only care about the folder names at this point...
            data_path, _ = path.split(data_path)
83
84

        # Check if the data is in the new or old format:
85
86
87
88
89
90
91
        # Check one level up:
        _, dir_name = path.split(data_path)
        if dir_name == ndf:
            # Though this translator could also read the files but the NDF Translator is more robust...
            return None
        # Check one level down:
        if ndf in listdir(data_path):
92
            # Though this translator could also read the files but the NDF Translator is more robust...
93
94
95
            return None

        file_path = path.join(data_path, listdir(path=data_path)[0])
96
97

        _, path_dict = BEodfTranslator._parse_file_path(file_path)
98

99
100
        if any([x.find('bigtime_0') > 0 and x.endswith('.dat') for x in path_dict.values()]):
            # This is a G-mode Line experiment:
101
            return None
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
        if any([x.find('bigtime_0') > 0 and x.endswith('.dat') for x in
                path_dict.values()]):
            # This is a G-mode Line experiment:
            return None

        parm_found = any([piece in path_dict.keys() for piece in
                          ['parm_txt', 'old_mat_parms']])
        real_found = any([piece in path_dict.keys() for piece in
                          ['read_real', 'write_real']])
        imag_found = any([piece in path_dict.keys() for piece in
                          ['read_imag', 'write_imag']])

        if parm_found and real_found and imag_found:
            if 'parm_txt' in path_dict.keys():
                return path_dict['parm_txt']
            else:
                return path_dict['old_mat_parms']
119
        else:
120
            return None
121

122
123
    def translate(self, file_path, show_plots=True, save_plots=True,
                  do_histogram=False, verbose=False):
Somnath, Suhas's avatar
Somnath, Suhas committed
124
125
126
127
128
129
130
131
132
133
134
135
136
137
        """
        Translates .dat data file(s) to a single .h5 file
        
        Parameters
        -------------
        file_path : String / Unicode
            Absolute file path for one of the data files. 
            It is assumed that this file is of the OLD data format.
        show_plots : (optional) Boolean
            Whether or not to show intermediate plots
        save_plots : (optional) Boolean
            Whether or not to save plots to disk
        do_histogram : (optional) Boolean
            Whether or not to construct histograms to visualize data quality. Note - this takes a fair amount of time
138
139
        verbose : (optional) Boolean
            Whether or not to print statements
Somnath, Suhas's avatar
Somnath, Suhas committed
140
141
142
143
144
145
            
        Returns
        ----------
        h5_path : String / Unicode
            Absolute path of the resultant .h5 file
        """
ssomnath's avatar
ssomnath committed
146
147
        self._verbose = verbose

148
        file_path = path.abspath(file_path)
Somnath, Suhas's avatar
Somnath, Suhas committed
149
        (folder_path, basename) = path.split(file_path)
150
        (basename, path_dict) = self._parse_file_path(file_path)
Unknown's avatar
Unknown committed
151

Somnath, Suhas's avatar
Somnath, Suhas committed
152
        h5_path = path.join(folder_path, basename + '.h5')
Somnath, Suhas's avatar
Somnath, Suhas committed
153
154
        tot_bins_multiplier = 1
        udvs_denom = 2
Unknown's avatar
Unknown committed
155

Somnath, Suhas's avatar
Somnath, Suhas committed
156
        if 'parm_txt' in path_dict.keys():
ssomnath's avatar
ssomnath committed
157
            if self._verbose:
158
                print('\treading parameters from text file')
ssomnath's avatar
ssomnath committed
159
160
            isBEPS, parm_dict = parmsToDict(path_dict['parm_txt'])

Somnath, Suhas's avatar
Somnath, Suhas committed
161
        elif 'old_mat_parms' in path_dict.keys():
ssomnath's avatar
ssomnath committed
162
            if self._verbose:
163
                print('\treading parameters from old mat file')
ssomnath's avatar
ssomnath committed
164
            parm_dict = self._get_parms_from_old_mat(path_dict['old_mat_parms'], verbose=self._verbose)
165
166
167
168
            if parm_dict['VS_steps_per_full_cycle'] == 0:
                isBEPS=False
            else:
                isBEPS=True
Somnath, Suhas's avatar
Somnath, Suhas committed
169
        else:
ssomnath's avatar
ssomnath committed
170
171
            raise FileNotFoundError('No parameters file found! Cannot '
                                    'translate this dataset!')
172

ssomnath's avatar
ssomnath committed
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
        # Initial text files named some parameters differently:
        for case in [('VS_mode', 'AC modulation mode',
                      'AC modulation mode with time reversal'),
                     ('VS_mode', 'load Arbitrary VS Wave from text file',
                      'load user defined VS Wave from file'),
                     ('BE_phase_content', 'chirp', 'chirp-sinc hybrid'),]:
            key, wrong_val, corr_val = case
            if key not in parm_dict.keys():
                continue
            if parm_dict[key] == wrong_val:
                warn('Updating parameter "{}" from invalid value of "{}" to '
                     '"{}"'.format(key, wrong_val, corr_val))
                parm_dict[key] = corr_val

        # Some .mat files did not set correct values to some parameters:
        for case in [('BE_amplitude_[V]', 1E-2, 0.5151),
                     ('VS_amplitude_[V]', 1E-2, 0.9876)]:
            key, min_val, new_val = case
            if key not in parm_dict.keys():
                continue
            if parm_dict[key] < min_val:
                warn('Updating parameter "{}" from invalid value of {} to {}'
                     ''.format(key, parm_dict[key], new_val))
                parm_dict[key] = new_val
197

ssomnath's avatar
ssomnath committed
198
        if self._verbose:
199
200
            keys = list(parm_dict.keys())
            keys.sort()
201
            print('\tExperiment parameters:')
202
203
204
205
            for key in keys:
                print('\t\t{} : {}'.format(key, parm_dict[key]))

            print('\n\tisBEPS = {}'.format(isBEPS))
Unknown's avatar
Unknown committed
206

Somnath, Suhas's avatar
Somnath, Suhas committed
207
208
209
        ignored_plt_grps = []
        if isBEPS:
            parm_dict['data_type'] = 'BEPSData'
Unknown's avatar
Unknown committed
210

Somnath, Suhas's avatar
Somnath, Suhas committed
211
212
            field_mode = parm_dict['VS_measure_in_field_loops']
            std_expt = parm_dict['VS_mode'] != 'load user defined VS Wave from file'
Unknown's avatar
Unknown committed
213

Somnath, Suhas's avatar
Somnath, Suhas committed
214
            if not std_expt:
215
                raise ValueError('This translator does not handle user defined voltage spectroscopy')
Unknown's avatar
Unknown committed
216
217
218

            spec_label = getSpectroscopicParmLabel(parm_dict['VS_mode'])

Somnath, Suhas's avatar
Somnath, Suhas committed
219
            if parm_dict['VS_mode'] in ['DC modulation mode', 'current mode']:
Somnath, Suhas's avatar
Somnath, Suhas committed
220
221
222
223
224
225
226
227
228
229
230
                if field_mode == 'in and out-of-field':
                    tot_bins_multiplier = 2
                    udvs_denom = 1
                else:
                    if field_mode == 'out-of-field':
                        ignored_plt_grps = ['in-field']
                    else:
                        ignored_plt_grps = ['out-of-field']
            else:
                tot_bins_multiplier = 1
                udvs_denom = 1
Unknown's avatar
Unknown committed
231

Somnath, Suhas's avatar
Somnath, Suhas committed
232
233
234
        else:
            spec_label = 'None'
            parm_dict['data_type'] = 'BELineData'
Unknown's avatar
Unknown committed
235

Somnath, Suhas's avatar
Somnath, Suhas committed
236
        # Check file sizes:
ssomnath's avatar
ssomnath committed
237
        if self._verbose:
238
239
            print('\tChecking sizes of real and imaginary data files')

Somnath, Suhas's avatar
Somnath, Suhas committed
240
        if 'read_real' in path_dict.keys():
Somnath, Suhas's avatar
Somnath, Suhas committed
241
242
            real_size = path.getsize(path_dict['read_real'])
            imag_size = path.getsize(path_dict['read_imag'])
Somnath, Suhas's avatar
Somnath, Suhas committed
243
244
245
        else:
            real_size = path.getsize(path_dict['write_real'])
            imag_size = path.getsize(path_dict['write_imag'])
Unknown's avatar
Unknown committed
246

Somnath, Suhas's avatar
Somnath, Suhas committed
247
        if real_size != imag_size:
ssomnath's avatar
ssomnath committed
248
249
250
251
            raise ValueError("Real and imaginary file sizes do not match!")

        if real_size == 0:
            raise ValueError('Real and imaginary files were empty')
Somnath, Suhas's avatar
Somnath, Suhas committed
252

253
        # Check here if a second channel for current is present
254
255
        # Look for the file containing the current data

ssomnath's avatar
ssomnath committed
256
        if self._verbose:
257
            print('\tLooking for secondary channels')
258
259
        file_names = listdir(folder_path)
        aux_files = []
Unknown's avatar
Unknown committed
260
        current_data_exists = False
261
262
263
264
265
266
267
        for fname in file_names:
            if 'AI2' in fname:
                if 'write' in fname:
                    current_file = path.join(folder_path, fname)
                    current_data_exists=True
                aux_files.append(path.join(folder_path, fname))

Unknown's avatar
Unknown committed
268
        add_pix = False
Somnath, Suhas's avatar
Somnath, Suhas committed
269
270
        num_rows = int(parm_dict['grid_num_rows'])
        num_cols = int(parm_dict['grid_num_cols'])
ssomnath's avatar
ssomnath committed
271
        if self._verbose:
272
            print('\tRows: {}, Cols: {}'.format(num_rows, num_cols))
Unknown's avatar
Unknown committed
273
274
        num_pix = num_rows * num_cols
        tot_bins = real_size / (num_pix * 4)
Chris Smith's avatar
Chris Smith committed
275
        # Check for case where only a single pixel is missing.
276
277
278
279
        if num_pix == 1:
            check_bins = real_size / (num_pix * 4)
        else:
            check_bins = real_size / ((num_pix - 1) * 4)
Unknown's avatar
Unknown committed
280

ssomnath's avatar
ssomnath committed
281
        if self._verbose:
282
283
284
            print('\tChecking bins: Total: {}, actual: {}'.format(tot_bins,
                                                                  check_bins))

Unknown's avatar
Unknown committed
285
        if tot_bins % 1 and check_bins % 1:
286
287
            raise ValueError('Aborting! Some parameter appears to have '
                             'changed in-between')
Somnath, Suhas's avatar
Somnath, Suhas committed
288
        elif not tot_bins % 1:
Chris Smith's avatar
Chris Smith committed
289
            # Everything's ok
Somnath, Suhas's avatar
Somnath, Suhas committed
290
291
292
            pass
        elif not check_bins % 1:
            tot_bins = check_bins
293
294
            warn('Warning:  A pixel seems to be missing from the data. '
                 'File will be padded with zeros.')
Unknown's avatar
Unknown committed
295
296
            add_pix = True

297
298
299
300
301
302
        # This would crash and fail later if not fixed here
        # I don't like this hacky approach to solve this problem
        if isBEPS and tot_bins % 1 == 0 and parm_dict['VS_mode'] != 'Custom':
            bins_per_step = parm_dict['FORC_num_of_FORC_cycles'] * \
                            parm_dict['VS_number_of_cycles'] * \
                            parm_dict['VS_steps_per_full_cycle'] * \
303
                            parm_dict['BE_bins_per_band']
304
305
306
            if verbose:
                print('\t\tNumber of bins per step: calculated: {}, actual {}'
                      ''.format(bins_per_step, tot_bins))
307
308
309
310
311
312
313
314
315
316
317
318
            if bins_per_step > 0:
                if bins_per_step < tot_bins and tot_bins / bins_per_step % 1 == 0:
                    scale = int(tot_bins / bins_per_step)
                    warn('Number of actual ({}) bins per step {}X larger than '
                         'calculated ({}) values. Will scale VS cycles to get '
                         'number of bins to match'
                         ''.format(tot_bins, scale, bins_per_step))
                    parm_dict['VS_number_of_cycles'] *= scale
            else:
                if verbose:
                    print('\t\tUnable to calculate number of bins per step '
                          'since one or more parameters were 0')
319

Unknown's avatar
Unknown committed
320
321
        tot_bins = int(tot_bins) * tot_bins_multiplier

Somnath, Suhas's avatar
Somnath, Suhas committed
322
        if isBEPS:
ssomnath's avatar
ssomnath committed
323
            if self._verbose:
324
                print('\tBuilding UDVS table for BEPS')
ssomnath's avatar
ssomnath committed
325
            UDVS_labs, UDVS_units, UDVS_mat = self._build_udvs_table(parm_dict)
Unknown's avatar
Unknown committed
326

ssomnath's avatar
ssomnath committed
327
            if self._verbose:
328
                print('\tTrimming UDVS table to remove unused plot group columns')
329

330
            UDVS_mat, UDVS_labs, UDVS_units = trimUDVS(UDVS_mat, UDVS_labs, UDVS_units, ignored_plt_grps)
Unknown's avatar
Unknown committed
331

332
            old_spec_inds = np.zeros(shape=(2, tot_bins), dtype=INDICES_DTYPE)
Unknown's avatar
Unknown committed
333

334
            # Will assume that all excitation waveforms have same num of bins
Unknown's avatar
Unknown committed
335
336
            num_actual_udvs_steps = UDVS_mat.shape[0] / udvs_denom
            bins_per_step = tot_bins / num_actual_udvs_steps
ssomnath's avatar
ssomnath committed
337
            if self._verbose:
338
339
                print('\t# UDVS steps: {}, # bins/step: {}'
                      ''.format(num_actual_udvs_steps, bins_per_step))
Unknown's avatar
Unknown committed
340

Somnath, Suhas's avatar
Somnath, Suhas committed
341
            if bins_per_step % 1:
Somnath, Suhas's avatar
Somnath, Suhas committed
342
343
                print('UDVS mat shape: {}, total bins: {}, bins per step: {}'.format(UDVS_mat.shape, tot_bins,
                                                                                     bins_per_step))
344
                raise ValueError('Non integer number of bins per step!')
Unknown's avatar
Unknown committed
345

Somnath, Suhas's avatar
Somnath, Suhas committed
346
347
            bins_per_step = int(bins_per_step)
            num_actual_udvs_steps = int(num_actual_udvs_steps)
Unknown's avatar
Unknown committed
348

349
350
351
            if len(np.unique(UDVS_mat[:, 2])) == 0:
                raise ValueError('No non-zero rows in AC amplitude')

Unknown's avatar
Unknown committed
352
353
            stind = 0
            for step_index in range(UDVS_mat.shape[0]):
Unknown's avatar
Unknown committed
354
355
356
                if UDVS_mat[step_index, 2] < 1E-3:  # invalid AC amplitude
                    continue
                # Bin step
357
                old_spec_inds[0, stind:stind + bins_per_step] = np.arange(bins_per_step, dtype=INDICES_DTYPE)
Unknown's avatar
Unknown committed
358
                # UDVS step
359
                old_spec_inds[1, stind:stind + bins_per_step] = step_index * np.ones(bins_per_step, dtype=INDICES_DTYPE)
Somnath, Suhas's avatar
Somnath, Suhas committed
360
                stind += bins_per_step
Somnath, Suhas's avatar
Somnath, Suhas committed
361
            del stind, step_index
Unknown's avatar
Unknown committed
362

Somnath, Suhas's avatar
Somnath, Suhas committed
363
        else:  # BE Line
ssomnath's avatar
ssomnath committed
364
            if self._verbose:
365
                print('\tPreparing supporting variables since BE-Line')
Somnath, Suhas's avatar
Somnath, Suhas committed
366
            self.signal_type = 1
Somnath, Suhas's avatar
Somnath, Suhas committed
367
            self.expt_type = 1  # Stephen has not used this index for some reason
Somnath, Suhas's avatar
Somnath, Suhas committed
368
369
            num_actual_udvs_steps = 1
            bins_per_step = tot_bins
Somnath, Suhas's avatar
Somnath, Suhas committed
370
            UDVS_labs = ['step_num', 'dc_offset', 'ac_amp', 'wave_type', 'wave_mod', 'be-line']
Somnath, Suhas's avatar
Somnath, Suhas committed
371
            UDVS_units = ['', 'V', 'A', '', '', '']
Somnath, Suhas's avatar
Somnath, Suhas committed
372
373
            UDVS_mat = np.array([1, 0, parm_dict['BE_amplitude_[V]'], 1, 1, 1],
                                dtype=np.float32).reshape(1, len(UDVS_labs))
Somnath, Suhas's avatar
Somnath, Suhas committed
374

Chris Smith's avatar
Chris Smith committed
375
376
            old_spec_inds = np.vstack((np.arange(tot_bins, dtype=INDICES_DTYPE),
                                       np.zeros(tot_bins, dtype=INDICES_DTYPE)))
Unknown's avatar
Unknown committed
377

378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
        if 'parm_mat' in path_dict.keys():
            if self._verbose:
                print('\treading BE arrays from parameters text file')
            bin_inds, bin_freqs, bin_FFT, ex_wfm = self._read_parms_mat(path_dict['parm_mat'], isBEPS)
        elif 'old_mat_parms' in path_dict.keys():
            if self._verbose:
                print('\treading BE arrays from old mat text file')
            bin_inds, bin_freqs, bin_FFT, ex_wfm, dc_amp_vec = self._read_old_mat_be_vecs(path_dict['old_mat_parms'], verbose=verbose)
        else:
            warn('No secondary parameters file (.mat) provided. Generating '
                 'dummy BE arrays')
            band_width = parm_dict['BE_band_width_[Hz]'] * (0.5 - parm_dict['BE_band_edge_trim'])
            st_f = parm_dict['BE_center_frequency_[Hz]'] - band_width
            en_f = parm_dict['BE_center_frequency_[Hz]'] + band_width
            bin_freqs = np.linspace(st_f, en_f, bins_per_step, dtype=np.float32)

            if verbose:
ssomnath's avatar
ssomnath committed
395
396
                print('\tGenerating BE arrays of length: '
                      '{}'.format(bins_per_step))
397
398
399
400
401
402
403
404
405
406
407
408
            bin_inds = np.zeros(shape=bins_per_step, dtype=np.int32)
            bin_FFT = np.zeros(shape=bins_per_step, dtype=np.complex64)
            ex_wfm = np.zeros(shape=bins_per_step, dtype=np.float32)

        # Forcing standardized datatypes:
        bin_inds = np.int32(bin_inds)
        bin_freqs = np.float32(bin_freqs)
        bin_FFT = np.complex64(bin_FFT)
        ex_wfm = np.float32(ex_wfm)

        self.FFT_BE_wave = bin_FFT

409
        # legacy parmeters inserted for BEAM
Somnath, Suhas's avatar
Somnath, Suhas committed
410
411
        parm_dict['num_bins'] = tot_bins
        parm_dict['num_pix'] = num_pix
412
        parm_dict['num_udvs_steps'] = num_actual_udvs_steps
Rama Vasudevan's avatar
Rama Vasudevan committed
413
        parm_dict['num_steps'] = num_actual_udvs_steps
Unknown's avatar
Unknown committed
414

ssomnath's avatar
ssomnath committed
415
        if self._verbose:
416
            print('\tPreparing UDVS slices for region references')
Somnath, Suhas's avatar
Somnath, Suhas committed
417
        udvs_slices = dict()
Somnath, Suhas's avatar
Somnath, Suhas committed
418
        for col_ind, col_name in enumerate(UDVS_labs):
Unknown's avatar
Unknown committed
419
420
            udvs_slices[col_name] = (slice(None), slice(col_ind, col_ind + 1))

Somnath, Suhas's avatar
Somnath, Suhas committed
421
        # Need to add the Bin Waveform type - infer from UDVS        
Unknown's avatar
Unknown committed
422
        exec_bin_vec = self.signal_type * np.ones(len(bin_inds), dtype=np.int32)
Somnath, Suhas's avatar
Somnath, Suhas committed
423
424

        if self.expt_type == 2:
ssomnath's avatar
ssomnath committed
425
            if self._verbose:
426
                print('\tExperiment type = 2. Doubling BE vectors')
Unknown's avatar
Unknown committed
427
            exec_bin_vec = np.hstack((exec_bin_vec, -1 * exec_bin_vec))
Somnath, Suhas's avatar
Somnath, Suhas committed
428
429
            bin_inds = np.hstack((bin_inds, bin_inds))
            bin_freqs = np.hstack((bin_freqs, bin_freqs))
Somnath, Suhas's avatar
Somnath, Suhas committed
430
            # This is wrong but I don't know what else to do
Somnath, Suhas's avatar
Somnath, Suhas committed
431
            bin_FFT = np.hstack((bin_FFT, bin_FFT))
Unknown's avatar
Unknown committed
432

Somnath, Suhas's avatar
Somnath, Suhas committed
433
        # Create Spectroscopic Values and Spectroscopic Values Labels datasets
434
        # This is an old and legacy way of doing things. Ideally, all we would need ot do is just get the unit values
ssomnath's avatar
ssomnath committed
435
        if self._verbose:
436
            print('\tCalculating spectroscopic values')
ssomnath's avatar
ssomnath committed
437
438
439
440
        ret_vals = createSpecVals(UDVS_mat, old_spec_inds, bin_freqs,
                                  exec_bin_vec, parm_dict, UDVS_labs,
                                  UDVS_units, verbose=verbose)
        spec_vals, spec_inds, spec_vals_labs, spec_vals_units, spec_vals_labs_names = ret_vals
441

ssomnath's avatar
ssomnath committed
442
        if self._verbose:
443
            print('\t\tspec_vals_labs: {}'.format(spec_vals_labs))
444
445
446
            unit_vals = get_unit_values(spec_inds, spec_vals,
                                        all_dim_names=spec_vals_labs,
                                        is_spec=True, verbose=False)
447
448
449
450
            print('\tUnit spectroscopic values')
            for key, val in unit_vals.items():
                print('\t\t{} : length: {}, values:\n\t\t\t{}'.format(key, len(val), val))

451
452
453
454
        if spec_inds.shape[1] != tot_bins:
            raise ValueError('Second axis of spectroscopic indices: {} not '
                             'matching with second axis of the expected main '
                             'dataset: {}'.format(spec_inds.shape, tot_bins))
455

456
457
458
459
        # Not sure what is happening here but this should work.
        spec_dim_dict = dict()
        for entry in spec_vals_labs_names:
            spec_dim_dict[entry[0] + '_parameters'] = entry[1]
Chris Smith's avatar
Chris Smith committed
460

Somnath, Suhas's avatar
Somnath, Suhas committed
461
462
463
        spec_vals_slices = dict()

        for row_ind, row_name in enumerate(spec_vals_labs):
Unknown's avatar
Unknown committed
464
            spec_vals_slices[row_name] = (slice(row_ind, row_ind + 1), slice(None))
Somnath, Suhas's avatar
Somnath, Suhas committed
465

466
        if path.exists(h5_path):
ssomnath's avatar
ssomnath committed
467
            if self._verbose:
468
                print('\tRemoving existing / old translated file: ' + h5_path)
469
            remove(h5_path)
Chris Smith's avatar
Chris Smith committed
470

471
        # First create the file
ssomnath's avatar
ssomnath committed
472
        h5_f = h5py.File(h5_path, mode='w')
Somnath, Suhas's avatar
Somnath, Suhas committed
473

474
        # Then write root level attributes
475
        global_parms = dict()
Somnath, Suhas's avatar
Somnath, Suhas committed
476
477
        global_parms['grid_size_x'] = parm_dict['grid_num_cols']
        global_parms['grid_size_y'] = parm_dict['grid_num_rows']
Somnath, Suhas's avatar
Somnath, Suhas committed
478
479
480
481
        try:
            global_parms['experiment_date'] = parm_dict['File_date_and_time']
        except KeyError:
            global_parms['experiment_date'] = '1:1:1'
Chris Smith's avatar
Chris Smith committed
482

Somnath, Suhas's avatar
Somnath, Suhas committed
483
        # assuming that the experiment was completed:
Unknown's avatar
Unknown committed
484
485
        global_parms['current_position_x'] = parm_dict['grid_num_cols'] - 1
        global_parms['current_position_y'] = parm_dict['grid_num_rows'] - 1
Somnath, Suhas's avatar
Somnath, Suhas committed
486
        global_parms['data_type'] = parm_dict['data_type']
Somnath, Suhas's avatar
Somnath, Suhas committed
487
        global_parms['translator'] = 'ODF'
ssomnath's avatar
ssomnath committed
488
        if self._verbose:
489
            print('\tWriting attributes to HDF5 file root')
490
        write_simple_attrs(h5_f, global_parms)
491
        write_book_keeping_attrs(h5_f)
Unknown's avatar
Unknown committed
492

493
494
        # Then create the measurement group
        h5_meas_group = create_indexed_group(h5_f, 'Measurement')
Unknown's avatar
Unknown committed
495

496
        # Write attributes at the measurement group level
ssomnath's avatar
ssomnath committed
497
        if self._verbose:
498
            print('\twriting attributes to Measurement group')
499
        write_simple_attrs(h5_meas_group, parm_dict)
Unknown's avatar
Unknown committed
500

501
502
        # Create the Channel group
        h5_chan_grp = create_indexed_group(h5_meas_group, 'Channel')
Unknown's avatar
Unknown committed
503

504
        # Write channel group attributes
Rama Vasudevan's avatar
Rama Vasudevan committed
505
506
        write_simple_attrs(h5_chan_grp, {'Channel_Input': 'IO_Analog_Input_1',
                                         'channel_type': 'BE'})
Unknown's avatar
Unknown committed
507

508
        # Now the datasets!
ssomnath's avatar
ssomnath committed
509
        if self._verbose:
510
            print('\tCreating ancillary datasets')
Chris Smith's avatar
Chris Smith committed
511
        h5_chan_grp.create_dataset('Excitation_Waveform', data=ex_wfm)
Unknown's avatar
Unknown committed
512

513
        h5_udvs = h5_chan_grp.create_dataset('UDVS', data=UDVS_mat)
ssomnath's avatar
ssomnath committed
514
515
516
        # TODO: Avoid using region references in USID
        write_region_references(h5_udvs, udvs_slices, add_labels_attr=True, verbose=self._verbose)
        write_simple_attrs(h5_udvs, {'units': UDVS_units}, verbose=False)
517

Chris Smith's avatar
Chris Smith committed
518
        h5_chan_grp.create_dataset('UDVS_Indices', data=old_spec_inds[1])
519

Chris Smith's avatar
Chris Smith committed
520
521
        h5_chan_grp.create_dataset('Bin_Step', data=np.arange(bins_per_step, dtype=INDICES_DTYPE),
                                   dtype=INDICES_DTYPE)
522

Chris Smith's avatar
Chris Smith committed
523
524
525
526
        h5_chan_grp.create_dataset('Bin_Indices', data=bin_inds, dtype=INDICES_DTYPE)
        h5_chan_grp.create_dataset('Bin_Frequencies', data=bin_freqs)
        h5_chan_grp.create_dataset('Bin_FFT', data=bin_FFT)
        h5_chan_grp.create_dataset('Bin_Wfm_Type', data=exec_bin_vec)
527

ssomnath's avatar
ssomnath committed
528
        if self._verbose:
529
530
531
532
            print('\tWriting Position datasets')

        pos_dims = [Dimension('X', 'm', np.arange(num_cols)),
                    Dimension('Y', 'm', np.arange(num_rows))]
ssomnath's avatar
ssomnath committed
533
534
        h5_pos_ind, h5_pos_val = write_ind_val_dsets(h5_chan_grp, pos_dims, is_spectral=False, verbose=self._verbose)
        if self._verbose:
535
            print('\tPosition datasets of shape: {}'.format(h5_pos_ind.shape))
536

ssomnath's avatar
ssomnath committed
537
        if self._verbose:
538
            print('\tWriting Spectroscopic datasets of shape: {}'.format(spec_inds.shape))
539
540
541
        h5_spec_inds = h5_chan_grp.create_dataset('Spectroscopic_Indices', data=spec_inds, dtype=INDICES_DTYPE)        
        h5_spec_vals = h5_chan_grp.create_dataset('Spectroscopic_Values', data=np.array(spec_vals), dtype=VALUES_DTYPE)
        for dset in [h5_spec_inds, h5_spec_vals]:
ssomnath's avatar
ssomnath committed
542
543
            write_region_references(dset, spec_vals_slices, add_labels_attr=True, verbose=self._verbose)
            write_simple_attrs(dset, {'units': spec_vals_units}, verbose=False)
544
            write_simple_attrs(dset, spec_dim_dict)
545
546

        # Noise floor should be of shape: (udvs_steps x 3 x positions)
ssomnath's avatar
ssomnath committed
547
        if self._verbose:
548
            print('\tWriting noise floor dataset')
Chris Smith's avatar
Chris Smith committed
549
550
        h5_chan_grp.create_dataset('Noise_Floor', (num_pix, num_actual_udvs_steps), dtype=nf32,
                                   chunks=(1, num_actual_udvs_steps))
551
552
553
554
555
556
557
558
559
560
561

        """
        New Method for chunking the Main_Data dataset.  Chunking is now done in N-by-N squares
        of UDVS steps by pixels.  N is determined dynamically based on the dimensions of the
        dataset.  Currently it is set such that individual chunks are less than 10kB in size.

        Chris Smith -- csmith55@utk.edu
        """
        BEPS_chunks = calc_chunks([num_pix, tot_bins],
                                  np.complex64(0).itemsize,
                                  unit_chunks=(1, bins_per_step))
ssomnath's avatar
ssomnath committed
562
        if self._verbose:
563
            print('\tHDF5 dataset will have chunks of size: {}'.format(BEPS_chunks))
564
            print('\tCreating empty main dataset of shape: ({}, {})'.format(num_pix, tot_bins))
565
566
567
        self.h5_raw = write_main_dataset(h5_chan_grp, (num_pix, tot_bins), 'Raw_Data', 'Piezoresponse', 'V', None, None,
                                         dtype=np.complex64, chunks=BEPS_chunks, compression='gzip',
                                         h5_pos_inds=h5_pos_ind, h5_pos_vals=h5_pos_val, h5_spec_inds=h5_spec_inds,
ssomnath's avatar
ssomnath committed
568
                                         h5_spec_vals=h5_spec_vals, verbose=self._verbose)
Somnath, Suhas's avatar
Somnath, Suhas committed
569

ssomnath's avatar
ssomnath committed
570
        if self._verbose:
571
572
            print('\tReading data from binary data files into raw HDF5')
        self._read_data(UDVS_mat, parm_dict, path_dict, real_size, isBEPS,
ssomnath's avatar
ssomnath committed
573
                        add_pix)
Unknown's avatar
Unknown committed
574

ssomnath's avatar
ssomnath committed
575
        if self._verbose:
576
            print('\tGenerating plot groups')
577
        generatePlotGroups(self.h5_raw, self.mean_resp, folder_path, basename,
Somnath, Suhas's avatar
Somnath, Suhas committed
578
                           self.max_resp, self.min_resp, max_mem_mb=self.max_ram,
Somnath, Suhas's avatar
Somnath, Suhas committed
579
                           spec_label=spec_label, show_plots=show_plots, save_plots=save_plots,
ssomnath's avatar
ssomnath committed
580
581
                           do_histogram=do_histogram, debug=self._verbose)
        if self._verbose:
582
            print('\tUpgrading to USIDataset')
583
        self.h5_raw = USIDataset(self.h5_raw)
Unknown's avatar
Unknown committed
584
585
586

        # Go ahead and read the current data in the second (current) channel
        if current_data_exists:                     #If a .dat file matches
ssomnath's avatar
ssomnath committed
587
            if self._verbose:
588
                print('\tReading data in secondary channels (current)')
589
            self._read_secondary_channel(h5_meas_group, aux_files)
590

ssomnath's avatar
ssomnath committed
591
        if self._verbose:
592
            print('\tClosing HDF5 file')
593
        h5_f.close()
Unknown's avatar
Unknown committed
594

Somnath, Suhas's avatar
Somnath, Suhas committed
595
        return h5_path
Chris Smith's avatar
Chris Smith committed
596

597
    def _read_data(self, UDVS_mat, parm_dict, path_dict, real_size, isBEPS,
ssomnath's avatar
ssomnath committed
598
                   add_pix):
Chris Smith's avatar
Chris Smith committed
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
        """
        Checks if the data is BEPS or BELine and calls the correct function to read the data from
        file

        Parameters
        ----------
        UDVS_mat : numpy.ndarray of float
            UDVS table
        parm_dict : dict
            Experimental parameters
        path_dict : dict
            Dictionary of data files to be read
        real_size : dict
            Size of each data file
        isBEPS : boolean
            Is the data BEPS
        add_pix : boolean
            Does the reader need to add extra pixels to the end of the dataset

        Returns
        -------
        None
        """
        # Now read the raw data files:
        if not isBEPS:
            # Do this for all BE-Line (always small enough to read in one shot)
ssomnath's avatar
ssomnath committed
625
            if self._verbose:
626
                print('\t\tReading all raw data for BE-Line in one shot')
627
628
            self._quick_read_data(path_dict['read_real'],
                                  path_dict['read_imag'],
ssomnath's avatar
ssomnath committed
629
                                  parm_dict['num_udvs_steps'])
630
631
        elif real_size < self.max_ram and \
                parm_dict['VS_measure_in_field_loops'] == 'out-of-field':
Chris Smith's avatar
Chris Smith committed
632
            # Do this for out-of-field BEPS ONLY that is also small (256 MB)
ssomnath's avatar
ssomnath committed
633
            if self._verbose:
634
635
636
                print('\t\tReading all raw BEPS (out-of-field) data at once')
            self._quick_read_data(path_dict['read_real'],
                                  path_dict['read_imag'],
ssomnath's avatar
ssomnath committed
637
                                  parm_dict['num_udvs_steps'])
638
639
        elif real_size < self.max_ram and \
                parm_dict['VS_measure_in_field_loops'] == 'in-field':
Chris Smith's avatar
Chris Smith committed
640
            # Do this for in-field only
ssomnath's avatar
ssomnath committed
641
            if self._verbose:
642
643
644
                print('\t\tReading all raw BEPS (in-field only) data at once')
            self._quick_read_data(path_dict['write_real'],
                                  path_dict['write_imag'],
ssomnath's avatar
ssomnath committed
645
                                  parm_dict['num_udvs_steps'])
Chris Smith's avatar
Chris Smith committed
646
647
        else:
            # Large BEPS datasets OR those with in-and-out of field
ssomnath's avatar
ssomnath committed
648
            if self._verbose:
649
650
651
652
653
                print('\t\tReading all raw data for in-and-out-of-field OR '
                      'very large file one pixel at a time')
            self._read_beps_data(path_dict, UDVS_mat.shape[0],
                                 parm_dict['VS_measure_in_field_loops'],
                                 add_pix)
654
        self.h5_raw.file.flush()
Chris Smith's avatar
Chris Smith committed
655

656
    def _read_beps_data(self, path_dict, udvs_steps, mode, add_pixel=False):
Somnath, Suhas's avatar
Somnath, Suhas committed
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
        """
        Reads the imaginary and real data files pixelwise and writes to the H5 file 
        
        Parameters 
        --------------------
        path_dict : dictionary
            Dictionary containing the absolute paths of the real and imaginary data files
        udvs_steps : unsigned int
            Number of UDVS steps
        mode : String / Unicode
            'in-field', 'out-of-field', or 'in and out-of-field'
        add_pixel : boolean. (Optional; default is False)
            If an empty pixel worth of data should be written to the end             
        
        Returns 
        -------------------- 
        None
        """
Unknown's avatar
Unknown committed
675

Somnath, Suhas's avatar
Somnath, Suhas committed
676
        print('---- reading pixel-by-pixel ----------')
Unknown's avatar
Unknown committed
677
678
679
680

        bytes_per_pix = self.h5_raw.shape[1] * 4
        step_size = self.h5_raw.shape[1] / udvs_steps

Somnath, Suhas's avatar
Somnath, Suhas committed
681
        if mode == 'out-of-field':
Unknown's avatar
Unknown committed
682
            parsers = [BEodfParser(path_dict['read_real'], path_dict['read_imag'],
Somnath, Suhas's avatar
Somnath, Suhas committed
683
                                   self.h5_raw.shape[0], bytes_per_pix)]
Somnath, Suhas's avatar
Somnath, Suhas committed
684
        elif mode == 'in-field':
Unknown's avatar
Unknown committed
685
            parsers = [BEodfParser(path_dict['write_real'], path_dict['write_imag'],
Somnath, Suhas's avatar
Somnath, Suhas committed
686
                                   self.h5_raw.shape[0], bytes_per_pix)]
Somnath, Suhas's avatar
Somnath, Suhas committed
687
688
        elif mode == 'in and out-of-field':
            # each file will only have half the udvs steps:
Unknown's avatar
Unknown committed
689
            if 0.5 * udvs_steps % 1:
690
691
                raise ValueError('Odd number of UDVS')

Unknown's avatar
Unknown committed
692
            udvs_steps = int(0.5 * udvs_steps)
Somnath, Suhas's avatar
Somnath, Suhas committed
693
            # be careful - each pair contains only half the necessary bins - so read half
Unknown's avatar
Unknown committed
694
            parsers = [BEodfParser(path_dict['write_real'], path_dict['write_imag'],
Somnath, Suhas's avatar
Somnath, Suhas committed
695
                                   self.h5_raw.shape[0], int(bytes_per_pix / 2)),
Unknown's avatar
Unknown committed
696
697
698
                       BEodfParser(path_dict['read_real'], path_dict['read_imag'],
                                   self.h5_raw.shape[0], int(bytes_per_pix / 2))]

Somnath, Suhas's avatar
Somnath, Suhas committed
699
            if step_size % 1:
700
701
                raise ValueError('strange number of bins per UDVS step. Exiting')

Somnath, Suhas's avatar
Somnath, Suhas committed
702
            step_size = int(step_size)
703

704
705
        rand_spectra = self._get_random_spectra(parsers, self.h5_raw.shape[0], udvs_steps, step_size,
                                                num_spectra=self.num_rand_spectra)
706
        take_conjugate = requires_conjugate(rand_spectra, cores=self._cores)
707

Somnath, Suhas's avatar
Somnath, Suhas committed
708
709
710
711
        self.mean_resp = np.zeros(shape=(self.h5_raw.shape[1]), dtype=np.complex64)
        self.max_resp = np.zeros(shape=(self.h5_raw.shape[0]), dtype=np.float32)
        self.min_resp = np.zeros(shape=(self.h5_raw.shape[0]), dtype=np.float32)

Unknown's avatar
Unknown committed
712
        numpix = self.h5_raw.shape[0]
Somnath, Suhas's avatar
Somnath, Suhas committed
713
714
715
        """ 
        Don't try to do the last step if a pixel is missing.   
        This will be handled after the loop. 
Unknown's avatar
Unknown committed
716
717
718
719
        """
        if add_pixel:
            numpix -= 1

Somnath, Suhas's avatar
Somnath, Suhas committed
720
        for pix_indx in range(numpix):
Somnath, Suhas's avatar
Somnath, Suhas committed
721
            if self.h5_raw.shape[0] > 5:
Unknown's avatar
Unknown committed
722
723
724
                if pix_indx % int(round(self.h5_raw.shape[0] / 10)) == 0:
                    print('Reading... {} complete'.format(round(100 * pix_indx / self.h5_raw.shape[0])))

Somnath, Suhas's avatar
Somnath, Suhas committed
725
726
727
            # get the raw stream from each parser
            pxl_data = list()
            for prsr in parsers:
Somnath, Suhas's avatar
Somnath, Suhas committed
728
                pxl_data.append(prsr.read_pixel())
Unknown's avatar
Unknown committed
729

Somnath, Suhas's avatar
Somnath, Suhas committed
730
731
732
733
734
            # interleave if both in and out of field
            # we are ignoring user defined possibilities...
            if mode == 'in and out-of-field':
                in_fld = pxl_data[0]
                out_fld = pxl_data[1]
Unknown's avatar
Unknown committed
735

Somnath, Suhas's avatar
Somnath, Suhas committed
736
737
                in_fld_2 = in_fld.reshape(udvs_steps, step_size)
                out_fld_2 = out_fld.reshape(udvs_steps, step_size)
Unknown's avatar
Unknown committed
738
                raw_mat = np.empty((udvs_steps * 2, step_size), dtype=out_fld.dtype)
Somnath, Suhas's avatar
Somnath, Suhas committed
739
740
                raw_mat[0::2, :] = in_fld_2
                raw_mat[1::2, :] = out_fld_2
Somnath, Suhas's avatar
Somnath, Suhas committed
741
742
                raw_vec = raw_mat.reshape(in_fld.size + out_fld.size).transpose()
            else:
Somnath, Suhas's avatar
Somnath, Suhas committed
743
                raw_vec = pxl_data[0]  # only one parser
Somnath, Suhas's avatar
Somnath, Suhas committed
744
745
            self.max_resp[pix_indx] = np.max(np.abs(raw_vec))
            self.min_resp[pix_indx] = np.min(np.abs(raw_vec))
Unknown's avatar
Unknown committed
746
            self.mean_resp = (1 / (pix_indx + 1)) * (raw_vec + pix_indx * self.mean_resp)
747
748
749

            if take_conjugate:
                raw_vec = np.conjugate(raw_vec)
750
            self.h5_raw[pix_indx, :] = np.complex64(raw_vec[:])
751
            self.h5_raw.file.flush()
Unknown's avatar
Unknown committed
752

Somnath, Suhas's avatar
Somnath, Suhas committed
753
        # Add zeros to main_data for the missing pixel. 
Unknown's avatar
Unknown committed
754
755
756
        if add_pixel:
            self.h5_raw[-1, :] = 0 + 0j

Somnath, Suhas's avatar
Somnath, Suhas committed
757
        print('---- Finished reading files -----')
758

ssomnath's avatar
ssomnath committed
759
    def _quick_read_data(self, real_path, imag_path, udvs_steps):
Somnath, Suhas's avatar
Somnath, Suhas committed
760
        """
Somnath, Suhas's avatar
Somnath, Suhas committed
761
762
763
764
765
766
767
768
        Returns information about the excitation BE waveform present in the .mat file

        Parameters
        -----------
        real_path : String / Unicode
            Absolute file path of the real data file
        imag_path : String / Unicode
            Absolute file path of the real data file
769
770
        udvs_steps : unsigned int
            Number of UDVS steps
Somnath, Suhas's avatar
Somnath, Suhas committed
771
        """
772
773
        parser = BEodfParser(real_path, imag_path, self.h5_raw.shape[0],
                             self.h5_raw.shape[1] * 4)
774
775

        step_size = self.h5_raw.shape[1] / udvs_steps
776
777
778
779
        rand_spectra = self._get_random_spectra([parser],
                                                self.h5_raw.shape[0],
                                                udvs_steps, step_size,
                                                num_spectra=self.num_rand_spectra,
ssomnath's avatar
ssomnath committed
780
781
                                                verbose=self._verbose)
        if self._verbose:
782
            print('\t\t\tChecking if conjugate is required')
783
        take_conjugate = requires_conjugate(rand_spectra, cores=self._cores)
Somnath, Suhas's avatar
Somnath, Suhas committed
784
        raw_vec = parser.read_all_data()
785
        if take_conjugate:
ssomnath's avatar
ssomnath committed
786
            if self._verbose:
787
                print('\t'*4 + 'Taking conjugate for positive quality factors')
788
            raw_vec = np.conjugate(raw_vec)
Unknown's avatar
Unknown committed
789

Rama Vasudevan's avatar
Rama Vasudevan committed
790
791
        if raw_vec.shape != np.prod(self.h5_raw.shape):
            percentage_padded = 100 * (np.prod(self.h5_raw.shape) - raw_vec.shape) / np.prod(self.h5_raw.shape)
792
            warn('Warning! Raw data length {} is not matching placeholder length {}. '
Rama Vasudevan's avatar
Rama Vasudevan committed
793
794
795
796
797
798
799
800
801
                  'Padding zeros for {}% of the data!'.format(raw_vec.shape, np.prod(self.h5_raw.shape), percentage_padded))

            padded_raw_vec = np.zeros(np.prod(self.h5_raw.shape), dtype = np.complex64)

            padded_raw_vec[:raw_vec.shape[0]] = raw_vec
            raw_mat = padded_raw_vec.reshape(self.h5_raw.shape[0], self.h5_raw.shape[1])
        else:
            raw_mat = raw_vec.reshape(self.h5_raw.shape[0], self.h5_raw.shape[1])

Somnath, Suhas's avatar
Somnath, Suhas committed
802
        # Write to the h5 dataset:
Somnath, Suhas's avatar
Somnath, Suhas committed
803
804
805
        self.mean_resp = np.mean(raw_mat, axis=0)
        self.max_resp = np.amax(np.abs(raw_mat), axis=0)
        self.min_resp = np.amin(np.abs(raw_mat), axis=0)
806
        self.h5_raw[:, :] = np.complex64(raw_mat)
807
        self.h5_raw.file.flush()
Somnath, Suhas's avatar
Somnath, Suhas committed
808

Unknown's avatar
Unknown committed
809
810
        print('---- Finished reading files -----')

811
812
    @staticmethod
    def _parse_file_path(data_filepath):
Somnath, Suhas's avatar
Somnath, Suhas committed
813
814
815
816
817
818
819
        """
        Returns the basename and a dictionary containing the absolute file paths for the
        real and imaginary data files, text and mat parameter files in a dictionary
        
        Parameters 
        --------------------
        data_filepath: String / Unicode
Somnath, Suhas's avatar
Somnath, Suhas committed
820
            Absolute path of any file in the same directory as the .dat files
Somnath, Suhas's avatar
Somnath, Suhas committed
821
822
823
824
825
826
827
828
829
        
        Returns 
        --------------------
        basename : String / Unicode
            Basename of the dataset      
        path_dict : Dictionary
            Dictionary containing absolute paths of all necessary data and parameter files
        """
        (folder_path, basename) = path.split(data_filepath)
Unknown's avatar
Unknown committed
830
        (super_folder, basename) = path.split(folder_path)
Somnath, Suhas's avatar
Somnath, Suhas committed
831

832
833
        if basename.endswith('_d') or basename.endswith('_c'):
            # Old old data format where the folder ended with a _d or _c to denote a completed spectroscopic run
Somnath, Suhas's avatar
Somnath, Suhas committed
834
835
836
837
838
839
840
841
            basename = basename[:-2]
        """
        A single pair of real and imaginary files are / were generated for:
            BE-Line and BEPS (compiled version only generated out-of-field or 'read')
        Two pairs of real and imaginary files were generated for later BEPS datasets
            These have 'read' and 'write' prefixes to denote out or in field respectively
        """
        path_dict = dict()
Unknown's avatar
Unknown committed
842

Somnath, Suhas's avatar
Somnath, Suhas committed
843
        for file_name in listdir(folder_path):
Chris Smith's avatar
Chris Smith committed
844
            abs_path = path.join(folder_path, file_name)
Somnath, Suhas's avatar
Somnath, Suhas committed
845
846
847
848
849
            if file_name.endswith('.txt') and file_name.find('parm') > 0:
                path_dict['parm_txt'] = abs_path
            elif file_name.find('.mat') > 0:
                if file_name.find('more_parms') > 0:
                    path_dict['parm_mat'] = abs_path
Unknown's avatar
Unknown committed
850
                elif file_name == (basename + '.mat'):
Somnath, Suhas's avatar
Somnath, Suhas committed
851
852
853
854
855
856
857
858
859
860
861
862
                    path_dict['old_mat_parms'] = abs_path
            elif file_name.endswith('.dat'):
                # Need to account for the second AI channel here
                file_tag = 'read'
                if file_name.find('write') > 0:
                    file_tag = 'write'
                if file_name.find('real') > 0:
                    file_tag += '_real'
                elif file_name.find('imag') > 0:
                    file_tag += '_imag'
                path_dict[file_tag] = abs_path

Chris Smith's avatar
Chris Smith committed
863
        return basename, path_dict
Somnath, Suhas's avatar
Somnath, Suhas committed
864

ssomnath's avatar
ssomnath committed
865
    def _read_secondary_channel(self, h5_meas_group, aux_file_path):
866
867
868
869
870
871
872
873
874
875
876
877
        """
        Reads secondary channel stored in AI .mat file
        Currently works for in-field measurements only, but should be updated to
        include both in and out of field measurements

        Parameters
        -----------
        h5_meas_group : h5 group
            Reference to the Measurement group
        aux_file_path : String / Unicode
            Absolute file path of the secondary channel file.
        """
ssomnath's avatar
ssomnath committed
878
        if self._verbose:
879
            print('\t---------- Reading Secondary Channel  ----------')
880
        if isinstance(aux_file_path, (list, tuple)):
881
882
883
884
            aux_file_paths = aux_file_path
        else:
            aux_file_paths = list(aux_file_path)

885
        is_in_out_field = 'Field' in self.h5_raw.spec_dim_labels
886

887
888
889
890
891
892
893
894
895
        if not is_in_out_field and len(aux_file_paths) > 1:
            # TODO: Find a better way to handle this
            warn('\t\tField was not varied but found more than one file for '
                 'secondary channel: {}.\n\t\tResults will be overwritten'
                 ''.format([path.split(item)[-1] for item in aux_file_paths]))
        elif is_in_out_field and len(aux_file_paths) == 1:
            warn('\t\tField was varied but only one data file for secondary'
                 'channel was found. Half the data will be zeros')

896
        spectral_len = 1
897
898
899
        for dim_name, dim_size in zip(self.h5_raw.spec_dim_labels,
                                      self.h5_raw.spec_dim_sizes):
            if dim_name == 'Frequency':
900
                continue
901
            spectral_len = spectral_len * dim_size