be_odf.py 71.2 KB
Newer Older
Somnath, Suhas's avatar
Somnath, Suhas committed
1
2
3
4
5
6
7
# -*- coding: utf-8 -*-
"""
Created on Tue Nov  3 15:24:12 2015

@author: Suhas Somnath, Stephen Jesse
"""

8
from __future__ import division, print_function, absolute_import, unicode_literals
9

Somnath, Suhas's avatar
Somnath, Suhas committed
10
from os import path, listdir, remove
11
import sys
12
import datetime
13
from warnings import warn
14
import h5py
Somnath, Suhas's avatar
Somnath, Suhas committed
15
16
import numpy as np
from scipy.io.matlab import loadmat  # To load parameters stored in Matlab .mat file
17

18
from .df_utils.be_utils import trimUDVS, getSpectroscopicParmLabel, parmsToDict, generatePlotGroups, \
19
20
    createSpecVals, requires_conjugate, generate_bipolar_triangular_waveform, \
    infer_bipolar_triangular_fraction_phase, nf32
21
from pyUSID.io.translator import Translator
22
23
from pyUSID.io.write_utils import INDICES_DTYPE, VALUES_DTYPE, Dimension, calc_chunks
from pyUSID.io.hdf_utils import write_ind_val_dsets, write_main_dataset, write_region_references, \
24
    create_indexed_group, write_simple_attrs, write_book_keeping_attrs, copy_attributes,\
25
    write_reduced_anc_dsets, get_unit_values
26
from pyUSID.io.usi_data import USIDataset
27
from pyUSID.processing.comp_utils import get_available_memory
28

29
30
31
if sys.version_info.major == 3:
    unicode = str

32

Somnath, Suhas's avatar
Somnath, Suhas committed
33
34
35
36
37
class BEodfTranslator(Translator):
    """
    Translates either the Band Excitation (BE) scan or Band Excitation 
    Polarization Switching (BEPS) data format from the old data format(s) to .h5
    """
Unknown's avatar
Unknown committed
38

Chris Smith's avatar
Chris Smith committed
39
40
41
    def __init__(self, *args, **kwargs):
        super(BEodfTranslator, self).__init__(*args, **kwargs)
        self.h5_raw = None
42
        self.num_rand_spectra = kwargs.pop('num_rand_spectra', 1000)
43
        self._cores = kwargs.pop('cores', None)
Unknown's avatar
Unknown committed
44
45
46
        self.FFT_BE_wave = None
        self.signal_type = None
        self.expt_type = None
Chris Smith's avatar
Chris Smith committed
47

48
    @staticmethod
49
    def is_valid_file(data_path):
50
51
52
53
54
        """
        Checks whether the provided file can be read by this translator

        Parameters
        ----------
55
        data_path : str
56
57
58
59
            Path to raw data file

        Returns
        -------
60
61
62
63
        obj : str
            Path to file that will be accepted by the translate() function if
            this translator is indeed capable of translating the provided file.
            Otherwise, None will be returned
64
        """
65
66
67
68
69
70
71
72
        if not isinstance(data_path, (str, unicode)):
            raise TypeError('data_path must be a string')

        ndf = 'newdataformat'

        data_path = path.abspath(data_path)

        if path.isfile(data_path):
73
74
75
76
            ext = data_path.split('.')[-1]
            if ext.lower() not in ['jpg', 'png', 'jpeg', 'tiff', 'mat', 'txt',
                                   'dat', 'xls', 'xlsx']:
                return None
77
78
            # we only care about the folder names at this point...
            data_path, _ = path.split(data_path)
79
80

        # Check if the data is in the new or old format:
81
82
83
84
85
86
87
        # Check one level up:
        _, dir_name = path.split(data_path)
        if dir_name == ndf:
            # Though this translator could also read the files but the NDF Translator is more robust...
            return None
        # Check one level down:
        if ndf in listdir(data_path):
88
            # Though this translator could also read the files but the NDF Translator is more robust...
89
90
91
            return None

        file_path = path.join(data_path, listdir(path=data_path)[0])
92
93

        _, path_dict = BEodfTranslator._parse_file_path(file_path)
94

95
96
        if any([x.find('bigtime_0') > 0 and x.endswith('.dat') for x in path_dict.values()]):
            # This is a G-mode Line experiment:
97
            return None
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
        if any([x.find('bigtime_0') > 0 and x.endswith('.dat') for x in
                path_dict.values()]):
            # This is a G-mode Line experiment:
            return None

        parm_found = any([piece in path_dict.keys() for piece in
                          ['parm_txt', 'old_mat_parms']])
        real_found = any([piece in path_dict.keys() for piece in
                          ['read_real', 'write_real']])
        imag_found = any([piece in path_dict.keys() for piece in
                          ['read_imag', 'write_imag']])

        if parm_found and real_found and imag_found:
            if 'parm_txt' in path_dict.keys():
                return path_dict['parm_txt']
            else:
                return path_dict['old_mat_parms']
115
        else:
116
            return None
117

118
119
    def translate(self, file_path, show_plots=True, save_plots=True,
                  do_histogram=False, verbose=False):
Somnath, Suhas's avatar
Somnath, Suhas committed
120
121
122
123
124
125
126
127
128
129
130
131
132
133
        """
        Translates .dat data file(s) to a single .h5 file
        
        Parameters
        -------------
        file_path : String / Unicode
            Absolute file path for one of the data files. 
            It is assumed that this file is of the OLD data format.
        show_plots : (optional) Boolean
            Whether or not to show intermediate plots
        save_plots : (optional) Boolean
            Whether or not to save plots to disk
        do_histogram : (optional) Boolean
            Whether or not to construct histograms to visualize data quality. Note - this takes a fair amount of time
134
135
        verbose : (optional) Boolean
            Whether or not to print statements
Somnath, Suhas's avatar
Somnath, Suhas committed
136
137
138
139
140
141
            
        Returns
        ----------
        h5_path : String / Unicode
            Absolute path of the resultant .h5 file
        """
142
        file_path = path.abspath(file_path)
Somnath, Suhas's avatar
Somnath, Suhas committed
143
        (folder_path, basename) = path.split(file_path)
144
        (basename, path_dict) = self._parse_file_path(file_path)
Unknown's avatar
Unknown committed
145

Somnath, Suhas's avatar
Somnath, Suhas committed
146
        h5_path = path.join(folder_path, basename + '.h5')
Somnath, Suhas's avatar
Somnath, Suhas committed
147
148
        tot_bins_multiplier = 1
        udvs_denom = 2
Unknown's avatar
Unknown committed
149

Somnath, Suhas's avatar
Somnath, Suhas committed
150
        if 'parm_txt' in path_dict.keys():
151
152
            if verbose:
                print('\treading parameters from text file')
Unknown's avatar
Unknown committed
153
            (isBEPS, parm_dict) = parmsToDict(path_dict['parm_txt'])
Somnath, Suhas's avatar
Somnath, Suhas committed
154
        elif 'old_mat_parms' in path_dict.keys():
155
156
            if verbose:
                print('\treading parameters from old mat file')
157
            parm_dict = self._get_parms_from_old_mat(path_dict['old_mat_parms'], verbose=verbose)
158
159
160
161
            if parm_dict['VS_steps_per_full_cycle'] == 0:
                isBEPS=False
            else:
                isBEPS=True
Somnath, Suhas's avatar
Somnath, Suhas committed
162
        else:
163
            raise FileNotFoundError('No parameters file found! Cannot translate this dataset!')
164

165
        if verbose:
166
167
            keys = list(parm_dict.keys())
            keys.sort()
168
            print('\tExperiment parameters:')
169
170
171
172
            for key in keys:
                print('\t\t{} : {}'.format(key, parm_dict[key]))

            print('\n\tisBEPS = {}'.format(isBEPS))
Unknown's avatar
Unknown committed
173

Somnath, Suhas's avatar
Somnath, Suhas committed
174
175
176
        ignored_plt_grps = []
        if isBEPS:
            parm_dict['data_type'] = 'BEPSData'
Unknown's avatar
Unknown committed
177

Somnath, Suhas's avatar
Somnath, Suhas committed
178
179
            field_mode = parm_dict['VS_measure_in_field_loops']
            std_expt = parm_dict['VS_mode'] != 'load user defined VS Wave from file'
Unknown's avatar
Unknown committed
180

Somnath, Suhas's avatar
Somnath, Suhas committed
181
            if not std_expt:
182
                raise ValueError('This translator does not handle user defined voltage spectroscopy')
Unknown's avatar
Unknown committed
183
184
185

            spec_label = getSpectroscopicParmLabel(parm_dict['VS_mode'])

Somnath, Suhas's avatar
Somnath, Suhas committed
186
            if parm_dict['VS_mode'] in ['DC modulation mode', 'current mode']:
Somnath, Suhas's avatar
Somnath, Suhas committed
187
188
189
190
191
192
193
194
195
196
197
                if field_mode == 'in and out-of-field':
                    tot_bins_multiplier = 2
                    udvs_denom = 1
                else:
                    if field_mode == 'out-of-field':
                        ignored_plt_grps = ['in-field']
                    else:
                        ignored_plt_grps = ['out-of-field']
            else:
                tot_bins_multiplier = 1
                udvs_denom = 1
Unknown's avatar
Unknown committed
198

Somnath, Suhas's avatar
Somnath, Suhas committed
199
200
201
        else:
            spec_label = 'None'
            parm_dict['data_type'] = 'BELineData'
Unknown's avatar
Unknown committed
202

Somnath, Suhas's avatar
Somnath, Suhas committed
203
        # Check file sizes:
204
205
206
        if verbose:
            print('\tChecking sizes of real and imaginary data files')

Somnath, Suhas's avatar
Somnath, Suhas committed
207
        if 'read_real' in path_dict.keys():
Somnath, Suhas's avatar
Somnath, Suhas committed
208
209
            real_size = path.getsize(path_dict['read_real'])
            imag_size = path.getsize(path_dict['read_imag'])
Somnath, Suhas's avatar
Somnath, Suhas committed
210
211
212
        else:
            real_size = path.getsize(path_dict['write_real'])
            imag_size = path.getsize(path_dict['write_imag'])
Unknown's avatar
Unknown committed
213

Somnath, Suhas's avatar
Somnath, Suhas committed
214
215
216
        if real_size != imag_size:
            raise ValueError("Real and imaginary file sizes DON'T match!. Ending")

217
        # Check here if a second channel for current is present
218
219
        # Look for the file containing the current data

220
221
        if verbose:
            print('\tLooking for secondary channels')
222
223
        file_names = listdir(folder_path)
        aux_files = []
Unknown's avatar
Unknown committed
224
        current_data_exists = False
225
226
227
228
229
230
231
        for fname in file_names:
            if 'AI2' in fname:
                if 'write' in fname:
                    current_file = path.join(folder_path, fname)
                    current_data_exists=True
                aux_files.append(path.join(folder_path, fname))

Unknown's avatar
Unknown committed
232
        add_pix = False
Somnath, Suhas's avatar
Somnath, Suhas committed
233
234
        num_rows = int(parm_dict['grid_num_rows'])
        num_cols = int(parm_dict['grid_num_cols'])
235
236
        if verbose:
            print('\tRows: {}, Cols: {}'.format(num_rows, num_cols))
Unknown's avatar
Unknown committed
237
238
        num_pix = num_rows * num_cols
        tot_bins = real_size / (num_pix * 4)
Chris Smith's avatar
Chris Smith committed
239
        # Check for case where only a single pixel is missing.
240
241
242
243
        if num_pix == 1:
            check_bins = real_size / (num_pix * 4)
        else:
            check_bins = real_size / ((num_pix - 1) * 4)
Unknown's avatar
Unknown committed
244

245
246
247
248
        if verbose:
            print('\tChecking bins: Total: {}, actual: {}'.format(tot_bins,
                                                                  check_bins))

Unknown's avatar
Unknown committed
249
        if tot_bins % 1 and check_bins % 1:
250
251
            raise ValueError('Aborting! Some parameter appears to have '
                             'changed in-between')
Somnath, Suhas's avatar
Somnath, Suhas committed
252
        elif not tot_bins % 1:
Chris Smith's avatar
Chris Smith committed
253
            # Everything's ok
Somnath, Suhas's avatar
Somnath, Suhas committed
254
255
256
            pass
        elif not check_bins % 1:
            tot_bins = check_bins
257
258
            warn('Warning:  A pixel seems to be missing from the data. '
                 'File will be padded with zeros.')
Unknown's avatar
Unknown committed
259
260
261
262
            add_pix = True

        tot_bins = int(tot_bins) * tot_bins_multiplier

Somnath, Suhas's avatar
Somnath, Suhas committed
263
        if 'parm_mat' in path_dict.keys():
264
265
            if verbose:
                print('\treading BE arrays from parameters text file')
266
            bin_inds, bin_freqs, bin_FFT, ex_wfm = self._read_parms_mat(path_dict['parm_mat'], isBEPS)
Somnath, Suhas's avatar
Somnath, Suhas committed
267
        elif 'old_mat_parms' in path_dict.keys():
268
269
            if verbose:
                print('\treading BE arrays from old mat text file')
270
            bin_inds, bin_freqs, bin_FFT, ex_wfm, dc_amp_vec = self._read_old_mat_be_vecs(path_dict['old_mat_parms'], verbose=verbose)
Somnath, Suhas's avatar
Somnath, Suhas committed
271
        else:
272
273
            if verbose:
                print('\tGenerating dummy BE arrays')
Unknown's avatar
Unknown committed
274
            band_width = parm_dict['BE_band_width_[Hz]'] * (0.5 - parm_dict['BE_band_edge_trim'])
Somnath, Suhas's avatar
Somnath, Suhas committed
275
            st_f = parm_dict['BE_center_frequency_[Hz]'] - band_width
Unknown's avatar
Unknown committed
276
            en_f = parm_dict['BE_center_frequency_[Hz]'] + band_width
Somnath, Suhas's avatar
Somnath, Suhas committed
277
            bin_freqs = np.linspace(st_f, en_f, tot_bins, dtype=np.float32)
Unknown's avatar
Unknown committed
278

279
            warn('No parms .mat file found.... Filling dummy values into ancillary datasets.')
Somnath, Suhas's avatar
Somnath, Suhas committed
280
281
282
            bin_inds = np.zeros(shape=tot_bins, dtype=np.int32)
            bin_FFT = np.zeros(shape=tot_bins, dtype=np.complex64)
            ex_wfm = np.zeros(shape=100, dtype=np.float32)
Unknown's avatar
Unknown committed
283

Somnath, Suhas's avatar
Somnath, Suhas committed
284
285
286
287
288
        # Forcing standardized datatypes:
        bin_inds = np.int32(bin_inds)
        bin_freqs = np.float32(bin_freqs)
        bin_FFT = np.complex64(bin_FFT)
        ex_wfm = np.float32(ex_wfm)
289

Somnath, Suhas's avatar
Somnath, Suhas committed
290
        self.FFT_BE_wave = bin_FFT
291

Somnath, Suhas's avatar
Somnath, Suhas committed
292
        if isBEPS:
293
            if verbose:
294
                print('\tBuilding UDVS table for BEPS')
295
            UDVS_labs, UDVS_units, UDVS_mat = self._build_udvs_table(parm_dict, verbose=verbose)
Unknown's avatar
Unknown committed
296

297
            if verbose:
298
                print('\tTrimming UDVS table to remove unused plot group columns')
299

300
            UDVS_mat, UDVS_labs, UDVS_units = trimUDVS(UDVS_mat, UDVS_labs, UDVS_units, ignored_plt_grps)
Unknown's avatar
Unknown committed
301

302
            old_spec_inds = np.zeros(shape=(2, tot_bins), dtype=INDICES_DTYPE)
Unknown's avatar
Unknown committed
303

304
            # Will assume that all excitation waveforms have same num of bins
Unknown's avatar
Unknown committed
305
306
            num_actual_udvs_steps = UDVS_mat.shape[0] / udvs_denom
            bins_per_step = tot_bins / num_actual_udvs_steps
307
308
309
            if verbose:
                print('\t# UDVS steps: {}, # bins/step: {}'
                      ''.format(num_actual_udvs_steps, bins_per_step))
Unknown's avatar
Unknown committed
310

Somnath, Suhas's avatar
Somnath, Suhas committed
311
            if bins_per_step % 1:
Somnath, Suhas's avatar
Somnath, Suhas committed
312
313
                print('UDVS mat shape: {}, total bins: {}, bins per step: {}'.format(UDVS_mat.shape, tot_bins,
                                                                                     bins_per_step))
314
                raise ValueError('Non integer number of bins per step!')
Unknown's avatar
Unknown committed
315

Somnath, Suhas's avatar
Somnath, Suhas committed
316
317
            bins_per_step = int(bins_per_step)
            num_actual_udvs_steps = int(num_actual_udvs_steps)
Unknown's avatar
Unknown committed
318
319
320

            stind = 0
            for step_index in range(UDVS_mat.shape[0]):
Unknown's avatar
Unknown committed
321
322
323
                if UDVS_mat[step_index, 2] < 1E-3:  # invalid AC amplitude
                    continue
                # Bin step
324
                old_spec_inds[0, stind:stind + bins_per_step] = np.arange(bins_per_step, dtype=INDICES_DTYPE)
Unknown's avatar
Unknown committed
325
                # UDVS step
326
                old_spec_inds[1, stind:stind + bins_per_step] = step_index * np.ones(bins_per_step, dtype=INDICES_DTYPE)
Somnath, Suhas's avatar
Somnath, Suhas committed
327
                stind += bins_per_step
Somnath, Suhas's avatar
Somnath, Suhas committed
328
            del stind, step_index
Unknown's avatar
Unknown committed
329

Somnath, Suhas's avatar
Somnath, Suhas committed
330
        else:  # BE Line
331
332
            if verbose:
                print('\tPreparing supporting variables since BE-Line')
Somnath, Suhas's avatar
Somnath, Suhas committed
333
            self.signal_type = 1
Somnath, Suhas's avatar
Somnath, Suhas committed
334
            self.expt_type = 1  # Stephen has not used this index for some reason
Somnath, Suhas's avatar
Somnath, Suhas committed
335
336
            num_actual_udvs_steps = 1
            bins_per_step = tot_bins
Somnath, Suhas's avatar
Somnath, Suhas committed
337
            UDVS_labs = ['step_num', 'dc_offset', 'ac_amp', 'wave_type', 'wave_mod', 'be-line']
Somnath, Suhas's avatar
Somnath, Suhas committed
338
            UDVS_units = ['', 'V', 'A', '', '', '']
Somnath, Suhas's avatar
Somnath, Suhas committed
339
340
            UDVS_mat = np.array([1, 0, parm_dict['BE_amplitude_[V]'], 1, 1, 1],
                                dtype=np.float32).reshape(1, len(UDVS_labs))
Somnath, Suhas's avatar
Somnath, Suhas committed
341

Chris Smith's avatar
Chris Smith committed
342
343
            old_spec_inds = np.vstack((np.arange(tot_bins, dtype=INDICES_DTYPE),
                                       np.zeros(tot_bins, dtype=INDICES_DTYPE)))
Unknown's avatar
Unknown committed
344

Somnath, Suhas's avatar
Somnath, Suhas committed
345
346
347
        # Some very basic information that can help the processing / analysis crew
        parm_dict['num_bins'] = tot_bins
        parm_dict['num_pix'] = num_pix
348
        parm_dict['num_udvs_steps'] = num_actual_udvs_steps
Rama Vasudevan's avatar
Rama Vasudevan committed
349
        parm_dict['num_steps'] = num_actual_udvs_steps
Unknown's avatar
Unknown committed
350

351
352
        if verbose:
            print('\tPreparing UDVS slices for region references')
Somnath, Suhas's avatar
Somnath, Suhas committed
353
        udvs_slices = dict()
Somnath, Suhas's avatar
Somnath, Suhas committed
354
        for col_ind, col_name in enumerate(UDVS_labs):
Unknown's avatar
Unknown committed
355
356
            udvs_slices[col_name] = (slice(None), slice(col_ind, col_ind + 1))

Somnath, Suhas's avatar
Somnath, Suhas committed
357
        # Need to add the Bin Waveform type - infer from UDVS        
Unknown's avatar
Unknown committed
358
        exec_bin_vec = self.signal_type * np.ones(len(bin_inds), dtype=np.int32)
Somnath, Suhas's avatar
Somnath, Suhas committed
359
360

        if self.expt_type == 2:
361
362
            if verbose:
                print('\tExperiment type = 2. Doubling BE vectors')
Unknown's avatar
Unknown committed
363
            exec_bin_vec = np.hstack((exec_bin_vec, -1 * exec_bin_vec))
Somnath, Suhas's avatar
Somnath, Suhas committed
364
365
            bin_inds = np.hstack((bin_inds, bin_inds))
            bin_freqs = np.hstack((bin_freqs, bin_freqs))
Somnath, Suhas's avatar
Somnath, Suhas committed
366
            # This is wrong but I don't know what else to do
Somnath, Suhas's avatar
Somnath, Suhas committed
367
            bin_FFT = np.hstack((bin_FFT, bin_FFT))
Unknown's avatar
Unknown committed
368

Somnath, Suhas's avatar
Somnath, Suhas committed
369
        # Create Spectroscopic Values and Spectroscopic Values Labels datasets
370
        # This is an old and legacy way of doing things. Ideally, all we would need ot do is just get the unit values
371
372
        if verbose:
            print('\tCalculating spectroscopic values')
ssomnath's avatar
ssomnath committed
373
374
375
376
        ret_vals = createSpecVals(UDVS_mat, old_spec_inds, bin_freqs,
                                  exec_bin_vec, parm_dict, UDVS_labs,
                                  UDVS_units, verbose=verbose)
        spec_vals, spec_inds, spec_vals_labs, spec_vals_units, spec_vals_labs_names = ret_vals
377
378

        if verbose:
379
            print('\t\tspec_vals_labs: {}'.format(spec_vals_labs))
380
381
382
            unit_vals = get_unit_values(spec_inds, spec_vals,
                                        all_dim_names=spec_vals_labs,
                                        is_spec=True, verbose=False)
383
384
385
386
            print('\tUnit spectroscopic values')
            for key, val in unit_vals.items():
                print('\t\t{} : length: {}, values:\n\t\t\t{}'.format(key, len(val), val))

387
388
389
390
        if spec_inds.shape[1] != tot_bins:
            raise ValueError('Second axis of spectroscopic indices: {} not '
                             'matching with second axis of the expected main '
                             'dataset: {}'.format(spec_inds.shape, tot_bins))
391

392
393
394
395
        # Not sure what is happening here but this should work.
        spec_dim_dict = dict()
        for entry in spec_vals_labs_names:
            spec_dim_dict[entry[0] + '_parameters'] = entry[1]
Chris Smith's avatar
Chris Smith committed
396

Somnath, Suhas's avatar
Somnath, Suhas committed
397
398
399
        spec_vals_slices = dict()

        for row_ind, row_name in enumerate(spec_vals_labs):
Unknown's avatar
Unknown committed
400
            spec_vals_slices[row_name] = (slice(row_ind, row_ind + 1), slice(None))
Somnath, Suhas's avatar
Somnath, Suhas committed
401

402
        if path.exists(h5_path):
403
404
            if verbose:
                print('\tRemoving existing / old translated file: ' + h5_path)
405
            remove(h5_path)
Chris Smith's avatar
Chris Smith committed
406

407
        # First create the file
ssomnath's avatar
ssomnath committed
408
        h5_f = h5py.File(h5_path, mode='w')
Somnath, Suhas's avatar
Somnath, Suhas committed
409

410
        # Then write root level attributes
411
        global_parms = dict()
Somnath, Suhas's avatar
Somnath, Suhas committed
412
413
        global_parms['grid_size_x'] = parm_dict['grid_num_cols']
        global_parms['grid_size_y'] = parm_dict['grid_num_rows']
Somnath, Suhas's avatar
Somnath, Suhas committed
414
415
416
417
        try:
            global_parms['experiment_date'] = parm_dict['File_date_and_time']
        except KeyError:
            global_parms['experiment_date'] = '1:1:1'
Chris Smith's avatar
Chris Smith committed
418

Somnath, Suhas's avatar
Somnath, Suhas committed
419
        # assuming that the experiment was completed:
Unknown's avatar
Unknown committed
420
421
        global_parms['current_position_x'] = parm_dict['grid_num_cols'] - 1
        global_parms['current_position_y'] = parm_dict['grid_num_rows'] - 1
Somnath, Suhas's avatar
Somnath, Suhas committed
422
        global_parms['data_type'] = parm_dict['data_type']
Somnath, Suhas's avatar
Somnath, Suhas committed
423
        global_parms['translator'] = 'ODF'
424
425
        if verbose:
            print('\tWriting attributes to HDF5 file root')
426
        write_simple_attrs(h5_f, global_parms)
427
        write_book_keeping_attrs(h5_f)
Unknown's avatar
Unknown committed
428

429
430
        # Then create the measurement group
        h5_meas_group = create_indexed_group(h5_f, 'Measurement')
Unknown's avatar
Unknown committed
431

432
        # Write attributes at the measurement group level
433
434
        if verbose:
            print('\twriting attributes to Measurement group')
435
        write_simple_attrs(h5_meas_group, parm_dict)
Unknown's avatar
Unknown committed
436

437
438
        # Create the Channel group
        h5_chan_grp = create_indexed_group(h5_meas_group, 'Channel')
Unknown's avatar
Unknown committed
439

440
        # Write channel group attributes
Rama Vasudevan's avatar
Rama Vasudevan committed
441
442
        write_simple_attrs(h5_chan_grp, {'Channel_Input': 'IO_Analog_Input_1',
                                         'channel_type': 'BE'})
Unknown's avatar
Unknown committed
443

444
        # Now the datasets!
445
446
        if verbose:
            print('\tCreating ancillary datasets')
Chris Smith's avatar
Chris Smith committed
447
        h5_chan_grp.create_dataset('Excitation_Waveform', data=ex_wfm)
Unknown's avatar
Unknown committed
448

449
450
451
        h5_udvs = h5_chan_grp.create_dataset('UDVS', data=UDVS_mat)
        write_region_references(h5_udvs, udvs_slices, add_labels_attr=True, verbose=verbose)
        write_simple_attrs(h5_udvs, {'units': UDVS_units}, verbose=verbose)
452

Chris Smith's avatar
Chris Smith committed
453
        h5_chan_grp.create_dataset('UDVS_Indices', data=old_spec_inds[1])
454

Chris Smith's avatar
Chris Smith committed
455
456
        h5_chan_grp.create_dataset('Bin_Step', data=np.arange(bins_per_step, dtype=INDICES_DTYPE),
                                   dtype=INDICES_DTYPE)
457

Chris Smith's avatar
Chris Smith committed
458
459
460
461
        h5_chan_grp.create_dataset('Bin_Indices', data=bin_inds, dtype=INDICES_DTYPE)
        h5_chan_grp.create_dataset('Bin_Frequencies', data=bin_freqs)
        h5_chan_grp.create_dataset('Bin_FFT', data=bin_FFT)
        h5_chan_grp.create_dataset('Bin_Wfm_Type', data=exec_bin_vec)
462

463
464
465
466
467
        if verbose:
            print('\tWriting Position datasets')

        pos_dims = [Dimension('X', 'm', np.arange(num_cols)),
                    Dimension('Y', 'm', np.arange(num_rows))]
468
        h5_pos_ind, h5_pos_val = write_ind_val_dsets(h5_chan_grp, pos_dims, is_spectral=False, verbose=verbose)
469
470
        if verbose:
            print('\tPosition datasets of shape: {}'.format(h5_pos_ind.shape))
471

472
        if verbose:
473
            print('\tWriting Spectroscopic datasets of shape: {}'.format(spec_inds.shape))
474
475
476
477
478
        h5_spec_inds = h5_chan_grp.create_dataset('Spectroscopic_Indices', data=spec_inds, dtype=INDICES_DTYPE)        
        h5_spec_vals = h5_chan_grp.create_dataset('Spectroscopic_Values', data=np.array(spec_vals), dtype=VALUES_DTYPE)
        for dset in [h5_spec_inds, h5_spec_vals]:
            write_region_references(dset, spec_vals_slices, add_labels_attr=True, verbose=verbose)
            write_simple_attrs(dset, {'units': spec_vals_units}, verbose=verbose)
479
            write_simple_attrs(dset, spec_dim_dict)
480
481

        # Noise floor should be of shape: (udvs_steps x 3 x positions)
482
483
        if verbose:
            print('\tWriting noise floor dataset')
Chris Smith's avatar
Chris Smith committed
484
485
        h5_chan_grp.create_dataset('Noise_Floor', (num_pix, num_actual_udvs_steps), dtype=nf32,
                                   chunks=(1, num_actual_udvs_steps))
486
487
488
489
490
491
492
493
494
495
496

        """
        New Method for chunking the Main_Data dataset.  Chunking is now done in N-by-N squares
        of UDVS steps by pixels.  N is determined dynamically based on the dimensions of the
        dataset.  Currently it is set such that individual chunks are less than 10kB in size.

        Chris Smith -- csmith55@utk.edu
        """
        BEPS_chunks = calc_chunks([num_pix, tot_bins],
                                  np.complex64(0).itemsize,
                                  unit_chunks=(1, bins_per_step))
497
498
        if verbose:
            print('\tHDF5 dataset will have chunks of size: {}'.format(BEPS_chunks))
499
            print('\tCreating empty main dataset of shape: ({}, {})'.format(num_pix, tot_bins))
500
501
502
503
        self.h5_raw = write_main_dataset(h5_chan_grp, (num_pix, tot_bins), 'Raw_Data', 'Piezoresponse', 'V', None, None,
                                         dtype=np.complex64, chunks=BEPS_chunks, compression='gzip',
                                         h5_pos_inds=h5_pos_ind, h5_pos_vals=h5_pos_val, h5_spec_inds=h5_spec_inds,
                                         h5_spec_vals=h5_spec_vals, verbose=verbose)
Somnath, Suhas's avatar
Somnath, Suhas committed
504

505
506
507
508
        if verbose:
            print('\tReading data from binary data files into raw HDF5')
        self._read_data(UDVS_mat, parm_dict, path_dict, real_size, isBEPS,
                        add_pix, verbose=verbose)
Unknown's avatar
Unknown committed
509

510
511
        if verbose:
            print('\tGenerating plot groups')
512
        generatePlotGroups(self.h5_raw, self.mean_resp, folder_path, basename,
Somnath, Suhas's avatar
Somnath, Suhas committed
513
                           self.max_resp, self.min_resp, max_mem_mb=self.max_ram,
Somnath, Suhas's avatar
Somnath, Suhas committed
514
                           spec_label=spec_label, show_plots=show_plots, save_plots=save_plots,
Unknown's avatar
Unknown committed
515
                           do_histogram=do_histogram, debug=verbose)
516
517
        if verbose:
            print('\tUpgrading to USIDataset')
518
        self.h5_raw = USIDataset(self.h5_raw)
Unknown's avatar
Unknown committed
519
520
521

        # Go ahead and read the current data in the second (current) channel
        if current_data_exists:                     #If a .dat file matches
522
523
            if verbose:
                print('\tReading data in secondary channels (current)')
524
525
            self._read_secondary_channel(h5_meas_group, aux_files,
                                         verbose=verbose)
526

527
528
        if verbose:
            print('\tClosing HDF5 file')
529
        h5_f.close()
Unknown's avatar
Unknown committed
530

Somnath, Suhas's avatar
Somnath, Suhas committed
531
        return h5_path
Chris Smith's avatar
Chris Smith committed
532

533
534
    def _read_data(self, UDVS_mat, parm_dict, path_dict, real_size, isBEPS,
                   add_pix, verbose=False):
Chris Smith's avatar
Chris Smith committed
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
        """
        Checks if the data is BEPS or BELine and calls the correct function to read the data from
        file

        Parameters
        ----------
        UDVS_mat : numpy.ndarray of float
            UDVS table
        parm_dict : dict
            Experimental parameters
        path_dict : dict
            Dictionary of data files to be read
        real_size : dict
            Size of each data file
        isBEPS : boolean
            Is the data BEPS
        add_pix : boolean
            Does the reader need to add extra pixels to the end of the dataset
553
554
        verbose : bool, optional. Default = False
            Whether or not to print logs
Chris Smith's avatar
Chris Smith committed
555
556
557
558
559
560
561
562

        Returns
        -------
        None
        """
        # Now read the raw data files:
        if not isBEPS:
            # Do this for all BE-Line (always small enough to read in one shot)
563
564
            if verbose:
                print('\t\tReading all raw data for BE-Line in one shot')
565
566
567
568
569
570
            self._quick_read_data(path_dict['read_real'],
                                  path_dict['read_imag'],
                                  parm_dict['num_udvs_steps'],
                                  verbose=verbose)
        elif real_size < self.max_ram and \
                parm_dict['VS_measure_in_field_loops'] == 'out-of-field':
Chris Smith's avatar
Chris Smith committed
571
            # Do this for out-of-field BEPS ONLY that is also small (256 MB)
572
            if verbose:
573
574
575
576
577
578
579
                print('\t\tReading all raw BEPS (out-of-field) data at once')
            self._quick_read_data(path_dict['read_real'],
                                  path_dict['read_imag'],
                                  parm_dict['num_udvs_steps'],
                                  verbose=verbose)
        elif real_size < self.max_ram and \
                parm_dict['VS_measure_in_field_loops'] == 'in-field':
Chris Smith's avatar
Chris Smith committed
580
            # Do this for in-field only
581
            if verbose:
582
583
584
585
586
                print('\t\tReading all raw BEPS (in-field only) data at once')
            self._quick_read_data(path_dict['write_real'],
                                  path_dict['write_imag'],
                                  parm_dict['num_udvs_steps'],
                                  verbose=verbose)
Chris Smith's avatar
Chris Smith committed
587
588
        else:
            # Large BEPS datasets OR those with in-and-out of field
589
            if verbose:
590
591
592
593
594
                print('\t\tReading all raw data for in-and-out-of-field OR '
                      'very large file one pixel at a time')
            self._read_beps_data(path_dict, UDVS_mat.shape[0],
                                 parm_dict['VS_measure_in_field_loops'],
                                 add_pix)
595
        self.h5_raw.file.flush()
Chris Smith's avatar
Chris Smith committed
596

597
    def _read_beps_data(self, path_dict, udvs_steps, mode, add_pixel=False):
Somnath, Suhas's avatar
Somnath, Suhas committed
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
        """
        Reads the imaginary and real data files pixelwise and writes to the H5 file 
        
        Parameters 
        --------------------
        path_dict : dictionary
            Dictionary containing the absolute paths of the real and imaginary data files
        udvs_steps : unsigned int
            Number of UDVS steps
        mode : String / Unicode
            'in-field', 'out-of-field', or 'in and out-of-field'
        add_pixel : boolean. (Optional; default is False)
            If an empty pixel worth of data should be written to the end             
        
        Returns 
        -------------------- 
        None
        """
Unknown's avatar
Unknown committed
616

Somnath, Suhas's avatar
Somnath, Suhas committed
617
        print('---- reading pixel-by-pixel ----------')
Unknown's avatar
Unknown committed
618
619
620
621

        bytes_per_pix = self.h5_raw.shape[1] * 4
        step_size = self.h5_raw.shape[1] / udvs_steps

Somnath, Suhas's avatar
Somnath, Suhas committed
622
        if mode == 'out-of-field':
Unknown's avatar
Unknown committed
623
            parsers = [BEodfParser(path_dict['read_real'], path_dict['read_imag'],
Somnath, Suhas's avatar
Somnath, Suhas committed
624
                                   self.h5_raw.shape[0], bytes_per_pix)]
Somnath, Suhas's avatar
Somnath, Suhas committed
625
        elif mode == 'in-field':
Unknown's avatar
Unknown committed
626
            parsers = [BEodfParser(path_dict['write_real'], path_dict['write_imag'],
Somnath, Suhas's avatar
Somnath, Suhas committed
627
                                   self.h5_raw.shape[0], bytes_per_pix)]
Somnath, Suhas's avatar
Somnath, Suhas committed
628
629
        elif mode == 'in and out-of-field':
            # each file will only have half the udvs steps:
Unknown's avatar
Unknown committed
630
            if 0.5 * udvs_steps % 1:
631
632
                raise ValueError('Odd number of UDVS')

Unknown's avatar
Unknown committed
633
            udvs_steps = int(0.5 * udvs_steps)
Somnath, Suhas's avatar
Somnath, Suhas committed
634
            # be careful - each pair contains only half the necessary bins - so read half
Unknown's avatar
Unknown committed
635
            parsers = [BEodfParser(path_dict['write_real'], path_dict['write_imag'],
Somnath, Suhas's avatar
Somnath, Suhas committed
636
                                   self.h5_raw.shape[0], int(bytes_per_pix / 2)),
Unknown's avatar
Unknown committed
637
638
639
                       BEodfParser(path_dict['read_real'], path_dict['read_imag'],
                                   self.h5_raw.shape[0], int(bytes_per_pix / 2))]

Somnath, Suhas's avatar
Somnath, Suhas committed
640
            if step_size % 1:
641
642
                raise ValueError('strange number of bins per UDVS step. Exiting')

Somnath, Suhas's avatar
Somnath, Suhas committed
643
            step_size = int(step_size)
644

645
646
        rand_spectra = self._get_random_spectra(parsers, self.h5_raw.shape[0], udvs_steps, step_size,
                                                num_spectra=self.num_rand_spectra)
647
        take_conjugate = requires_conjugate(rand_spectra, cores=self._cores)
648

Somnath, Suhas's avatar
Somnath, Suhas committed
649
650
651
652
        self.mean_resp = np.zeros(shape=(self.h5_raw.shape[1]), dtype=np.complex64)
        self.max_resp = np.zeros(shape=(self.h5_raw.shape[0]), dtype=np.float32)
        self.min_resp = np.zeros(shape=(self.h5_raw.shape[0]), dtype=np.float32)

Unknown's avatar
Unknown committed
653
        numpix = self.h5_raw.shape[0]
Somnath, Suhas's avatar
Somnath, Suhas committed
654
655
656
        """ 
        Don't try to do the last step if a pixel is missing.   
        This will be handled after the loop. 
Unknown's avatar
Unknown committed
657
658
659
660
        """
        if add_pixel:
            numpix -= 1

Somnath, Suhas's avatar
Somnath, Suhas committed
661
        for pix_indx in range(numpix):
Somnath, Suhas's avatar
Somnath, Suhas committed
662
            if self.h5_raw.shape[0] > 5:
Unknown's avatar
Unknown committed
663
664
665
                if pix_indx % int(round(self.h5_raw.shape[0] / 10)) == 0:
                    print('Reading... {} complete'.format(round(100 * pix_indx / self.h5_raw.shape[0])))

Somnath, Suhas's avatar
Somnath, Suhas committed
666
667
668
            # get the raw stream from each parser
            pxl_data = list()
            for prsr in parsers:
Somnath, Suhas's avatar
Somnath, Suhas committed
669
                pxl_data.append(prsr.read_pixel())
Unknown's avatar
Unknown committed
670

Somnath, Suhas's avatar
Somnath, Suhas committed
671
672
673
674
675
            # interleave if both in and out of field
            # we are ignoring user defined possibilities...
            if mode == 'in and out-of-field':
                in_fld = pxl_data[0]
                out_fld = pxl_data[1]
Unknown's avatar
Unknown committed
676

Somnath, Suhas's avatar
Somnath, Suhas committed
677
678
                in_fld_2 = in_fld.reshape(udvs_steps, step_size)
                out_fld_2 = out_fld.reshape(udvs_steps, step_size)
Unknown's avatar
Unknown committed
679
                raw_mat = np.empty((udvs_steps * 2, step_size), dtype=out_fld.dtype)
Somnath, Suhas's avatar
Somnath, Suhas committed
680
681
                raw_mat[0::2, :] = in_fld_2
                raw_mat[1::2, :] = out_fld_2
Somnath, Suhas's avatar
Somnath, Suhas committed
682
683
                raw_vec = raw_mat.reshape(in_fld.size + out_fld.size).transpose()
            else:
Somnath, Suhas's avatar
Somnath, Suhas committed
684
                raw_vec = pxl_data[0]  # only one parser
Somnath, Suhas's avatar
Somnath, Suhas committed
685
686
            self.max_resp[pix_indx] = np.max(np.abs(raw_vec))
            self.min_resp[pix_indx] = np.min(np.abs(raw_vec))
Unknown's avatar
Unknown committed
687
            self.mean_resp = (1 / (pix_indx + 1)) * (raw_vec + pix_indx * self.mean_resp)
688
689
690

            if take_conjugate:
                raw_vec = np.conjugate(raw_vec)
691
            self.h5_raw[pix_indx, :] = np.complex64(raw_vec[:])
692
            self.h5_raw.file.flush()
Unknown's avatar
Unknown committed
693

Somnath, Suhas's avatar
Somnath, Suhas committed
694
        # Add zeros to main_data for the missing pixel. 
Unknown's avatar
Unknown committed
695
696
697
        if add_pixel:
            self.h5_raw[-1, :] = 0 + 0j

Somnath, Suhas's avatar
Somnath, Suhas committed
698
        print('---- Finished reading files -----')
699

700
701
    def _quick_read_data(self, real_path, imag_path, udvs_steps,
                         verbose=False):
Somnath, Suhas's avatar
Somnath, Suhas committed
702
        """
Somnath, Suhas's avatar
Somnath, Suhas committed
703
704
705
706
707
708
709
710
        Returns information about the excitation BE waveform present in the .mat file

        Parameters
        -----------
        real_path : String / Unicode
            Absolute file path of the real data file
        imag_path : String / Unicode
            Absolute file path of the real data file
711
712
        udvs_steps : unsigned int
            Number of UDVS steps
713
714
        verbose : bool, optional. Defdault = False
            Whether or not to print debugging logs
Somnath, Suhas's avatar
Somnath, Suhas committed
715
        """
716
717
        parser = BEodfParser(real_path, imag_path, self.h5_raw.shape[0],
                             self.h5_raw.shape[1] * 4)
718
719

        step_size = self.h5_raw.shape[1] / udvs_steps
720
721
722
723
724
725
726
        rand_spectra = self._get_random_spectra([parser],
                                                self.h5_raw.shape[0],
                                                udvs_steps, step_size,
                                                num_spectra=self.num_rand_spectra,
                                                verbose=verbose)
        if verbose:
            print('\t\t\tChecking if conjugate is required')
727
        take_conjugate = requires_conjugate(rand_spectra, cores=self._cores)
Somnath, Suhas's avatar
Somnath, Suhas committed
728
        raw_vec = parser.read_all_data()
729
        if take_conjugate:
730
731
            if verbose:
                print('\t'*4 + 'Taking conjugate for positive quality factors')
732
            raw_vec = np.conjugate(raw_vec)
Unknown's avatar
Unknown committed
733

Rama Vasudevan's avatar
Rama Vasudevan committed
734
735
        if raw_vec.shape != np.prod(self.h5_raw.shape):
            percentage_padded = 100 * (np.prod(self.h5_raw.shape) - raw_vec.shape) / np.prod(self.h5_raw.shape)
736
            warn('Warning! Raw data length {} is not matching placeholder length {}. '
Rama Vasudevan's avatar
Rama Vasudevan committed
737
738
739
740
741
742
743
744
745
                  'Padding zeros for {}% of the data!'.format(raw_vec.shape, np.prod(self.h5_raw.shape), percentage_padded))

            padded_raw_vec = np.zeros(np.prod(self.h5_raw.shape), dtype = np.complex64)

            padded_raw_vec[:raw_vec.shape[0]] = raw_vec
            raw_mat = padded_raw_vec.reshape(self.h5_raw.shape[0], self.h5_raw.shape[1])
        else:
            raw_mat = raw_vec.reshape(self.h5_raw.shape[0], self.h5_raw.shape[1])

Somnath, Suhas's avatar
Somnath, Suhas committed
746
        # Write to the h5 dataset:
Somnath, Suhas's avatar
Somnath, Suhas committed
747
748
749
        self.mean_resp = np.mean(raw_mat, axis=0)
        self.max_resp = np.amax(np.abs(raw_mat), axis=0)
        self.min_resp = np.amin(np.abs(raw_mat), axis=0)
750
        self.h5_raw[:, :] = np.complex64(raw_mat)
751
        self.h5_raw.file.flush()
Somnath, Suhas's avatar
Somnath, Suhas committed
752

Unknown's avatar
Unknown committed
753
754
        print('---- Finished reading files -----')

755
756
    @staticmethod
    def _parse_file_path(data_filepath):
Somnath, Suhas's avatar
Somnath, Suhas committed
757
758
759
760
761
762
763
        """
        Returns the basename and a dictionary containing the absolute file paths for the
        real and imaginary data files, text and mat parameter files in a dictionary
        
        Parameters 
        --------------------
        data_filepath: String / Unicode
Somnath, Suhas's avatar
Somnath, Suhas committed
764
            Absolute path of any file in the same directory as the .dat files
Somnath, Suhas's avatar
Somnath, Suhas committed
765
766
767
768
769
770
771
772
773
        
        Returns 
        --------------------
        basename : String / Unicode
            Basename of the dataset      
        path_dict : Dictionary
            Dictionary containing absolute paths of all necessary data and parameter files
        """
        (folder_path, basename) = path.split(data_filepath)
Unknown's avatar
Unknown committed
774
        (super_folder, basename) = path.split(folder_path)
Somnath, Suhas's avatar
Somnath, Suhas committed
775

776
777
        if basename.endswith('_d') or basename.endswith('_c'):
            # Old old data format where the folder ended with a _d or _c to denote a completed spectroscopic run
Somnath, Suhas's avatar
Somnath, Suhas committed
778
779
780
781
782
783
784
785
            basename = basename[:-2]
        """
        A single pair of real and imaginary files are / were generated for:
            BE-Line and BEPS (compiled version only generated out-of-field or 'read')
        Two pairs of real and imaginary files were generated for later BEPS datasets
            These have 'read' and 'write' prefixes to denote out or in field respectively
        """
        path_dict = dict()
Unknown's avatar
Unknown committed
786

Somnath, Suhas's avatar
Somnath, Suhas committed
787
        for file_name in listdir(folder_path):
Chris Smith's avatar
Chris Smith committed
788
            abs_path = path.join(folder_path, file_name)
Somnath, Suhas's avatar
Somnath, Suhas committed
789
790
791
792
793
            if file_name.endswith('.txt') and file_name.find('parm') > 0:
                path_dict['parm_txt'] = abs_path
            elif file_name.find('.mat') > 0:
                if file_name.find('more_parms') > 0:
                    path_dict['parm_mat'] = abs_path
Unknown's avatar
Unknown committed
794
                elif file_name == (basename + '.mat'):
Somnath, Suhas's avatar
Somnath, Suhas committed
795
796
797
798
799
800
801
802
803
804
805
806
                    path_dict['old_mat_parms'] = abs_path
            elif file_name.endswith('.dat'):
                # Need to account for the second AI channel here
                file_tag = 'read'
                if file_name.find('write') > 0:
                    file_tag = 'write'
                if file_name.find('real') > 0:
                    file_tag += '_real'
                elif file_name.find('imag') > 0:
                    file_tag += '_imag'
                path_dict[file_tag] = abs_path

Chris Smith's avatar
Chris Smith committed
807
        return basename, path_dict
Somnath, Suhas's avatar
Somnath, Suhas committed
808

809
810
    def _read_secondary_channel(self, h5_meas_group, aux_file_path,
                                verbose=False):
811
812
813
814
815
816
817
818
819
820
821
822
        """
        Reads secondary channel stored in AI .mat file
        Currently works for in-field measurements only, but should be updated to
        include both in and out of field measurements

        Parameters
        -----------
        h5_meas_group : h5 group
            Reference to the Measurement group
        aux_file_path : String / Unicode
            Absolute file path of the secondary channel file.
        """
823
824
        if verbose:
            print('\t---------- Reading Secondary Channel  ----------')
825
        if isinstance(aux_file_path, (list, tuple)):
826
827
828
829
            aux_file_paths = aux_file_path
        else:
            aux_file_paths = list(aux_file_path)

830
        is_in_out_field = 'Field' in self.h5_raw.spec_dim_labels
831

832
833
834
835
836
837
838
839
840
        if not is_in_out_field and len(aux_file_paths) > 1:
            # TODO: Find a better way to handle this
            warn('\t\tField was not varied but found more than one file for '
                 'secondary channel: {}.\n\t\tResults will be overwritten'
                 ''.format([path.split(item)[-1] for item in aux_file_paths]))
        elif is_in_out_field and len(aux_file_paths) == 1:
            warn('\t\tField was varied but only one data file for secondary'
                 'channel was found. Half the data will be zeros')

841
        spectral_len = 1
842
843
844
        for dim_name, dim_size in zip(self.h5_raw.spec_dim_labels,
                                      self.h5_raw.spec_dim_sizes):
            if dim_name == 'Frequency':
845
                continue
846
            spectral_len = spectral_len * dim_size
847

848
        num_pix = self.h5_raw.shape[0]
849
        if verbose:
850
851
852
            print('\t\tExpecting this channel to be of shape: ({}, {})'
                  ''.format(num_pix, spectral_len))
            print('\t\tis_in_out_field: {}'.format(is_in_out_field))
853
854

        # create a new channel
855
856
        h5_current_channel_group = create_indexed_group(h5_meas_group,
                                                        'Channel')
857
858
859
860
861

        # Copy attributes from the main channel
        copy_attributes(self.h5_raw.parent, h5_current_channel_group)

        # Modify attributes that are different
862
863
864
        write_simple_attrs(h5_current_channel_group,
                           {'Channel_Input': 'IO_Analog_Input_2',
                            'channel_type': 'Current'},
ssomnath's avatar
ssomnath committed
865
                           verbose=False)
866
867
868
869
870
871
872

        # Get the reduced dimensions
        ret_vals = write_reduced_anc_dsets(h5_current_channel_group,
                                           self.h5_raw.h5_spec_inds,
                                           self.h5_raw.h5_spec_vals,
                                           'Frequency', is_spec=True)
        h5_current_spec_inds, h5_current_spec_values = ret_vals
873

874
875
        if verbose:
            print('\t\tCreated groups, wrote attributes and spec datasets')
876
877
878
879
880
881
882
883
884
885
886
887
888

        h5_current_main = write_main_dataset(h5_current_channel_group,  # parent HDF5 group
                                             (num_pix, spectral_len),  # shape of Main dataset
                                             'Raw_Data',  # Name of main dataset
                                             'Current',  # Physical quantity contained in Main dataset
                                             'nA',  # Units for the physical quantity
                                             None,  # Position dimensions
                                             None,  # Spectroscopic dimensions
                                             h5_pos_inds=self.h5_raw.h5_pos_inds,
                                             h5_pos_vals=self.h5_raw.h5_pos_vals,
                                             h5_spec_inds=h5_current_spec_inds,
                                             h5_spec_vals=h5_current_spec_values,
                                             dtype=np.float32,  # data type / precision
ssomnath's avatar
ssomnath committed
889
890
                                             main_dset_attrs={'IO_rate': 4E+6, 'Amplifier_Gain': 9},
                                             verbose=verbose)
891

892
893
894
895
        if verbose:
            print('\t\tCreated empty main dataset:\n{}'
                  ''.format(h5_current_main))

896
897
898
899
900
901
902
        if is_in_out_field:
            if verbose:
                print('\t\tHalving the spectral length per binary file to: {} '
                      'since this measurement has in and out of field'
                      ''.format(spectral_len // 2))
            spectral_len = spectral_len // 2

903
        # calculate the # positions that can be stored in memory in one go.
904
905
906
907
        b_per_position = np.float32(0).itemsize * spectral_len

        max_pos_per_read = int(np.floor((get_available_memory()) / b_per_position))

908
909
910
911
        if verbose:
            print('\t\tAllowed to read {} pixels per chunk'
                  ''.format(max_pos_per_read))
            print('\t\tStarting to read raw binary data')
912

913
        # Open the read and write files and write them to the hdf5 file
914
915
916
917
        for aux_file in aux_file_paths:
            if 'write' in aux_file:
                infield = True
            else:
918
919
920
921
                infield = False

            if verbose:
                print('\t' * 3 + 'Reading file: {}'.format(aux_file))
922
923
924
925
926
927

            cur_file = open(aux_file, "rb")

            start_pix = 0

            while start_pix < num_pix:
928
929
                cur_file.seek(start_pix * b_per_position, 0)

930
931
                end_pix = min(num_pix, start_pix + max_pos_per_read)

932
933
934
935
936
937
938
939
940
                pos_to_read = end_pix - start_pix
                bytes_to_read = pos_to_read * b_per_position

                if verbose:
                    print('\t' * 4 + 'Reading pixels {} to {} - {} bytes'
                          ''.format(start_pix, end_pix, bytes_to_read))

                cur_data = np.frombuffer(cur_file.read(bytes_to_read),
                                         dtype='f')