Commit c6ff9d8f authored by Somnath, Suhas's avatar Somnath, Suhas
Browse files

PEP8 cleanup. build_ind_val_dsets moved to translators.utils from translators.

parent 22e7cc56
......@@ -15,7 +15,7 @@ from scipy.io.matlab import loadmat # To load parameters stored in Matlab .mat
from .be_utils import trimUDVS, getSpectroscopicParmLabel, parmsToDict, generatePlotGroups, createSpecVals, \
requires_conjugate
from .translator import Translator
from .utils import generateDummyMainParms
from .utils import generate_dummy_main_parms, build_ind_val_dsets
from ..hdf_utils import getH5DsetRefs, linkRefs, calc_chunks
from ..io_hdf5 import ioHDF5
from ..microdata import MicroDataGroup, MicroDataset
......@@ -58,7 +58,7 @@ class BEodfTranslator(Translator):
Absolute path of the resultant .h5 file
"""
(folder_path, basename) = path.split(file_path)
(basename, path_dict) = self._parsefilepath(file_path)
(basename, path_dict) = self._parse_file_path(file_path)
h5_path = path.join(folder_path, basename + '.h5')
tot_bins_multiplier = 1
......@@ -160,8 +160,8 @@ class BEodfTranslator(Translator):
self.FFT_BE_wave = bin_FFT
ds_pos_ind, ds_pos_val = self._build_ind_val_dsets([num_cols, num_rows], is_spectral=False,
labels=['X', 'Y'], units=['m', 'm'], verbose=False)
ds_pos_ind, ds_pos_val = build_ind_val_dsets([num_cols, num_rows], is_spectral=False,
labels=['X', 'Y'], units=['m', 'm'], verbose=False)
if isBEPS:
(UDVS_labs, UDVS_units, UDVS_mat) = self.__build_udvs_table(parm_dict)
......@@ -300,7 +300,7 @@ class BEodfTranslator(Translator):
meas_grp.addChildren([chan_grp])
spm_data = MicroDataGroup('')
global_parms = generateDummyMainParms()
global_parms = generate_dummy_main_parms()
global_parms['grid_size_x'] = parm_dict['grid_num_cols']
global_parms['grid_size_y'] = parm_dict['grid_num_rows']
try:
......@@ -523,7 +523,7 @@ class BEodfTranslator(Translator):
print('---- Finished reading files -----')
def _parsefilepath(self, data_filepath):
def _parse_file_path(self, data_filepath):
"""
Returns the basename and a dictionary containing the absolute file paths for the
real and imaginary data files, text and mat parameter files in a dictionary
......
......@@ -11,7 +11,7 @@ from warnings import warn
from os import path, remove # File Path formatting
from scipy.io.matlab import loadmat; # To load parameters stored in Matlab .mat file
from .translator import Translator # Because this class extends the abstract Translator class
from .utils import makePositionMat, getPositionSlicing, generateDummyMainParms
from .utils import make_position_mat, get_position_slicing, generate_dummy_main_parms
from .be_utils import trimUDVS, getSpectroscopicParmLabel, generatePlotGroups, createSpecVals
from ..microdata import MicroDataGroup, MicroDataset # The building blocks for defining heirarchical storage in the H5 file
from ..io_hdf5 import ioHDF5 # Now the translator is responsible for writing the data.
......@@ -114,8 +114,8 @@ class BEodfRelaxationTranslator(Translator):
ex_wfm = np.float32(ex_wfm)
self.FFT_BE_wave = bin_FFT
pos_mat = makePositionMat([num_cols, num_rows])
pos_slices = getPositionSlicing(['X','Y'], num_pix)
pos_mat = make_position_mat([num_cols, num_rows])
pos_slices = get_position_slicing(['X', 'Y'], num_pix)
ds_ex_wfm = MicroDataset('Excitation_Waveform', ex_wfm)
ds_pos_ind = MicroDataset('Position_Indices', pos_mat, dtype=np.uint32)
......@@ -231,7 +231,7 @@ class BEodfRelaxationTranslator(Translator):
meas_grp.addChildren([chan_grp])
spm_data = MicroDataGroup('')
global_parms = generateDummyMainParms()
global_parms = generate_dummy_main_parms()
global_parms['grid_size_x'] = parm_dict['grid_num_cols'];
global_parms['grid_size_y'] = parm_dict['grid_num_rows'];
global_parms['experiment_date'] = parm_dict['File_date_and_time']
......
......@@ -12,7 +12,7 @@ from os import path, listdir, remove
from warnings import warn
import xlrd as xlreader # To read the UDVS spreadsheet
from scipy.io.matlab import loadmat # To load parameters stored in Matlab .mat file
from .utils import makePositionMat, generateDummyMainParms
from .utils import make_position_mat, generate_dummy_main_parms
from .be_utils import trimUDVS, getSpectroscopicParmLabel, parmsToDict, generatePlotGroups, normalizeBEresponse, \
createSpecVals
from ..microdata import MicroDataGroup, MicroDataset
......@@ -57,7 +57,7 @@ class BEPSndfTranslator(Translator):
if debug:
print('BEndfTranslator: Getting file paths')
parm_filepath, udvs_filepath, parms_mat_path = self._parsefilepath(data_filepath)
parm_filepath, udvs_filepath, parms_mat_path = self._parse_file_path(data_filepath)
if debug:
print('BEndfTranslator: Reading Parms text file')
......@@ -120,12 +120,12 @@ class BEPSndfTranslator(Translator):
s_pixels = np.array(parsers[0].getSpatialPixels())
self.pos_labels = ['Laser Spot', 'Z', 'X', 'Y']
self.pos_labels = [self.pos_labels[i] for i in np.where(s_pixels > 1)[0]]
self.pos_mat = makePositionMat(s_pixels)
self.pos_mat = make_position_mat(s_pixels)
self.pos_units = ['' for _ in range(len(self.pos_labels))]
# self.pos_mat = np.int32(self.pos_mat)
# Helping Eric out a bit. Remove this section at a later time:
main_parms = generateDummyMainParms()
main_parms = generate_dummy_main_parms()
main_parms['grid_size_x'] = self.parm_dict['grid_num_cols']
main_parms['grid_size_y'] = self.parm_dict['grid_num_rows']
main_parms['experiment_date'] = self.parm_dict['File_date_and_time']
......@@ -530,7 +530,7 @@ class BEPSndfTranslator(Translator):
###################################################################################################
def _parsefilepath(self, file_path):
def _parse_file_path(self, file_path):
"""
Returns the file paths to the parms text file and UDVS spreadsheet.\n
Note: This function also initializes the basename and the folder_path for this instance
......
......@@ -14,7 +14,7 @@ from scipy.io.matlab import loadmat # To load parameters stored in Matlab .mat f
from .gmode_utils import readGmodeParms
from .translator import Translator # Because this class extends the abstract Translator class
from .utils import makePositionMat, getPositionSlicing, generateDummyMainParms
from .utils import make_position_mat, get_position_slicing, generate_dummy_main_parms
from ..hdf_utils import getH5DsetRefs, linkRefs
from ..io_hdf5 import ioHDF5 # Now the translator is responsible for writing the data.
from ..microdata import MicroDataGroup, MicroDataset # The building blocks for defining heirarchical storage in the H5 file
......@@ -75,8 +75,8 @@ class GDMTranslator(Translator):
num_pix = num_rows*num_cols
pos_mat = makePositionMat([num_cols, num_rows])
pos_slices = getPositionSlicing(['X','Y'], num_pix)
pos_mat = make_position_mat([num_cols, num_rows])
pos_slices = get_position_slicing(['X', 'Y'], num_pix)
# Now start creating datasets and populating:
ds_pos_ind = MicroDataset('Position_Indices', np.uint32(pos_mat))
......@@ -106,7 +106,7 @@ class GDMTranslator(Translator):
meas_grp.addChildren([chan_grp])
spm_data = MicroDataGroup('')
global_parms = generateDummyMainParms()
global_parms = generate_dummy_main_parms()
global_parms['grid_size_x'] = parm_dict['grid_num_cols'];
global_parms['grid_size_y'] = parm_dict['grid_num_rows'];
# assuming that the experiment was completed:
......
......@@ -14,7 +14,7 @@ import h5py
import numpy as np # For array operations
from .translator import Translator
from .utils import generateDummyMainParms
from .utils import generate_dummy_main_parms, build_ind_val_dsets
from ..hdf_utils import getH5DsetRefs, linkRefs
from ..io_hdf5 import ioHDF5 # Now the translator is responsible for writing the data.
from ..microdata import MicroDataGroup, MicroDataset # building blocks for defining heirarchical storage in the H5 file
......@@ -25,7 +25,7 @@ class GIVTranslator(Translator):
Translates G-mode Fast IV datasets from .mat files to .h5
"""
def _parsefilepath(self, input_path):
def _parse_file_path(self, input_path):
pass
def translate(self, parm_path):
......@@ -51,16 +51,14 @@ class GIVTranslator(Translator):
remove(h5_path)
# Now start creating datasets and populating:
ds_spec_inds, ds_spec_vals = self._build_ind_val_dsets([excit_wfm.size], is_spectral=True,
labels=['Bias'], units=['V'], verbose=False)
ds_spec_inds, ds_spec_vals = build_ind_val_dsets([excit_wfm.size], is_spectral=True,
labels=['Bias'], units=['V'], verbose=False)
ds_spec_vals.data = np.atleast_2d(excit_wfm) # The data generated above varies linearly. Override.
ds_pos_ind, ds_pos_val = self._build_ind_val_dsets([parm_dict['grid_num_rows']], is_spectral=False,
steps=[1.0 * parm_dict['grid_scan_height_[m]'] /
parm_dict['grid_num_rows']],
labels=['Y'], units=['m'])
ds_ex_efm = MicroDataset('Excitation_Waveform', excit_wfm)
ds_pos_ind, ds_pos_val = build_ind_val_dsets([parm_dict['grid_num_rows']], is_spectral=False,
steps=[1.0 * parm_dict['grid_scan_height_[m]'] /
parm_dict['grid_num_rows']],
labels=['Y'], units=['m'])
# Minimize file size to the extent possible.
# DAQs are rated at 16 bit so float16 should be most appropriate.
......@@ -71,7 +69,7 @@ class GIVTranslator(Translator):
ds_raw_data.attrs['quantity'] = ['Current']
ds_raw_data.attrs['units'] = ['1E-{} A'.format(parm_dict['IO_amplifier_gain'])]
aux_ds_names = ['Excitation_Waveform', 'Position_Indices', 'Position_Values',
aux_ds_names = ['Position_Indices', 'Position_Values',
'Spectroscopic_Indices', 'Spectroscopic_Values']
# Until a better method is provided....
......@@ -81,7 +79,7 @@ class GIVTranslator(Translator):
# technically should change the date, etc.
spm_data = MicroDataGroup('')
global_parms = generateDummyMainParms()
global_parms = generate_dummy_main_parms()
global_parms['data_type'] = 'fastIV'
global_parms['translator'] = 'fastIV'
spm_data.attrs = global_parms
......@@ -99,7 +97,7 @@ class GIVTranslator(Translator):
chan_grp = MicroDataGroup('{:s}{:03d}'.format('Channel_', chan_index), '/Measurement_000/')
chan_grp.attrs = parm_dict
chan_grp.addChildren([ds_pos_ind, ds_pos_val, ds_spec_inds, ds_spec_vals,
ds_ex_efm, ds_raw_data])
ds_raw_data])
h5_refs = hdf.writeData(chan_grp, print_log=False)
h5_raw = getH5DsetRefs(['Raw_Data'], h5_refs)[0]
linkRefs(h5_raw, getH5DsetRefs(aux_ds_names, h5_refs))
......
......@@ -10,10 +10,9 @@ from os import path, listdir, remove
from warnings import warn
import numpy as np
from scipy.io.matlab import loadmat # To load parameters stored in Matlab .mat file
from .be_utils import parmsToDict
from .translator import Translator
from .utils import interpretFreq, generateDummyMainParms
from .utils import interpret_frequency, generate_dummy_main_parms, build_ind_val_dsets
from ..hdf_utils import getH5DsetRefs, linkRefs
from ..io_hdf5 import ioHDF5
from ..microdata import MicroDataGroup, MicroDataset
......@@ -32,7 +31,7 @@ class GLineTranslator(Translator):
data_filepath: Absolute path of the data file (.dat) in
"""
# Figure out the basename of the data:
(basename, parm_paths, data_paths) = self._parsefilepath(file_path)
(basename, parm_paths, data_paths) = self._parse_file_path(file_path)
(folder_path, unused) = path.split(file_path)
h5_path = path.join(folder_path, basename+'.h5')
......@@ -54,7 +53,7 @@ class GLineTranslator(Translator):
isBEPS, parm_dict = parmsToDict(parm_paths['parm_txt'])
# IO rate is the same for the entire board / any channel
IO_rate = interpretFreq(parm_dict['IO rate'])
IO_rate = interpret_frequency(parm_dict['IO rate'])
# Get file byte size:
# For now, assume that bigtime_00 always exists and is the main file
......@@ -84,7 +83,7 @@ class GLineTranslator(Translator):
# First finish writing all global parameters, create the file too:
spm_data = MicroDataGroup('')
global_parms = generateDummyMainParms()
global_parms = generate_dummy_main_parms()
global_parms['data_type'] = 'GLine'
global_parms['translator'] = 'GLine'
spm_data.attrs = global_parms
......@@ -105,10 +104,10 @@ class GLineTranslator(Translator):
maxshape=(num_pix, self.num_points),
chunking=(1, self.num_points), dtype=np.float16)
ds_pos_ind, ds_pos_val = self._build_ind_val_dsets([self.num_cols, self.num_rows], is_spectral=False,
labels=['X', 'Y'], units=['m', 'm'])
ds_spec_inds, ds_spec_vals = self._build_ind_val_dsets([self.num_points], is_spectral=True,
labels=['Excitation'], units=['V'])
ds_pos_ind, ds_pos_val = build_ind_val_dsets([self.num_cols, self.num_rows], is_spectral=False,
labels=['X', 'Y'], units=['m', 'm'])
ds_spec_inds, ds_spec_vals = build_ind_val_dsets([self.num_points], is_spectral=True,
labels=['Excitation'], units=['V'])
ds_spec_vals.data = np.atleast_2d(np.float32(BE_wave)) # Override the default waveform
aux_ds_names = ['Position_Indices', 'Position_Values',
......@@ -135,7 +134,7 @@ class GLineTranslator(Translator):
hdf.close()
@staticmethod
def _parsefilepath(data_filepath):
def _parse_file_path(data_filepath):
"""
Goes through the file directory and figures out the basename and the
parameter (text and .mat), data file paths (for each analog input channel)
......
......@@ -12,7 +12,7 @@ import numpy as np # For array operations
from igor import binarywave as bw
from .translator import Translator # Because this class extends the abstract Translator class
from .utils import generateDummyMainParms
from .utils import generate_dummy_main_parms, build_ind_val_dsets
from ..hdf_utils import getH5DsetRefs, linkRefs
from ..io_hdf5 import ioHDF5 # Now the translator is responsible for writing the data.
from ..microdata import MicroDataGroup, \
......@@ -64,13 +64,13 @@ class IgorIBWTranslator(Translator):
images = images.transpose(2, 0, 1) # now ordered as [chan, Y, X] image
images = np.reshape(images, (images.shape[0], -1, 1)) # 3D [chan, Y*X points,1]
ds_pos_ind, ds_pos_val = self._build_ind_val_dsets([num_rows, num_cols], is_spectral=False,
steps=[1.0 * parm_dict['SlowScanSize'] / num_rows,
1.0 * parm_dict['FastScanSize'] / num_cols],
labels=['Y', 'X'], units=['m', 'm'], verbose=verbose)
ds_pos_ind, ds_pos_val = build_ind_val_dsets([num_rows, num_cols], is_spectral=False,
steps=[1.0 * parm_dict['SlowScanSize'] / num_rows,
1.0 * parm_dict['FastScanSize'] / num_cols],
labels=['Y', 'X'], units=['m', 'm'], verbose=verbose)
ds_spec_inds, ds_spec_vals = self._build_ind_val_dsets([1], is_spectral=True, steps=[1],
labels=['arb'], units=['a.u.'], verbose=verbose)
ds_spec_inds, ds_spec_vals = build_ind_val_dsets([1], is_spectral=True, steps=[1],
labels=['arb'], units=['a.u.'], verbose=verbose)
else: # single force curve
if verbose:
......@@ -80,11 +80,11 @@ class IgorIBWTranslator(Translator):
images = np.atleast_3d(images) # now [Z, chan, 1]
images = images.transpose((1, 2, 0)) # [chan ,1, Z] force curve
ds_pos_ind, ds_pos_val = self._build_ind_val_dsets([1], is_spectral=False, steps=[25E-9],
labels=['X'], units=['m'], verbose=verbose)
ds_pos_ind, ds_pos_val = build_ind_val_dsets([1], is_spectral=False, steps=[25E-9],
labels=['X'], units=['m'], verbose=verbose)
ds_spec_inds, ds_spec_vals = self._build_ind_val_dsets([images.shape[2]], is_spectral=True, labels=['Z'],
units=['m'], verbose=verbose)
ds_spec_inds, ds_spec_vals = build_ind_val_dsets([images.shape[2]], is_spectral=True, labels=['Z'],
units=['m'], verbose=verbose)
# The data generated above varies linearly. Override.
# For now, we'll shove the Z sensor data into the spectroscopic values.
......@@ -113,7 +113,7 @@ class IgorIBWTranslator(Translator):
# Prepare the tree structure
# technically should change the date, etc.
spm_data = MicroDataGroup('')
global_parms = generateDummyMainParms()
global_parms = generate_dummy_main_parms()
global_parms['data_type'] = 'IgorIBW_' + type_suffix
global_parms['translator'] = 'IgorIBW'
spm_data.attrs = global_parms
......@@ -241,7 +241,7 @@ class IgorIBWTranslator(Translator):
return labels, default_units
def _parsefilepath(self, input_path):
def _parse_file_path(self, input_path):
pass
def _read_data(self):
......
......@@ -6,12 +6,11 @@ Created on Feb 9, 2016
import os
import numpy as np
from skimage.data import imread
from skimage.measure import block_reduce
from ..io_image import read_image, read_dm3
from ..io_image import read_image
from .translator import Translator
from .utils import generateDummyMainParms
from ..hdf_utils import getH5DsetRefs, calc_chunks, linkformain
from .utils import generate_dummy_main_parms, build_ind_val_dsets
from ..hdf_utils import getH5DsetRefs, calc_chunks, link_as_main
from ..io_hdf5 import ioHDF5
from ..microdata import MicroDataGroup, MicroDataset
......@@ -60,7 +59,7 @@ class ImageTranslator(Translator):
HDF5 Dataset object that contains the flattened images
"""
image_path, h5_path = self._parsefilepath(image_path)
image_path, h5_path = self._parse_file_path(image_path)
image, image_parms = read_image(image_path, **image_args)
usize, vsize = image.shape[:2]
......@@ -87,13 +86,13 @@ class ImageTranslator(Translator):
image = self.binning_func(image, self.bin_factor, self.bin_func)
h5_main = self._setupH5(usize, vsize, image.dtype.type, image_parms)
h5_main = self._setup_h5(usize, vsize, image.dtype.type, image_parms)
h5_main = self._read_data(image, h5_main)
return h5_main
def _setupH5(self, usize, vsize, data_type, image_parms):
def _setup_h5(self, usize, vsize, data_type, image_parms):
"""
Setup the HDF5 file in which to store the data including creating
the Position and Spectroscopic datasets
......@@ -117,7 +116,7 @@ class ImageTranslator(Translator):
"""
num_pixels = usize * vsize
root_parms = generateDummyMainParms()
root_parms = generate_dummy_main_parms()
root_parms['data_type'] = 'ImageData'
root_parms.update(image_parms)
......@@ -135,13 +134,13 @@ class ImageTranslator(Translator):
chan_grp = MicroDataGroup('Channel_000')
# Get the Position and Spectroscopic Datasets
# ds_spec_ind, ds_spec_vals = self._buildspectroscopicdatasets(usize, vsize, num_pixels)
ds_spec_ind, ds_spec_vals = self._build_ind_val_dsets([1],
is_spectral=True,
labels=['Image'])
ds_pos_ind, ds_pos_val = self._build_ind_val_dsets((usize, vsize),
is_spectral=False,
labels=['X', 'Y'],
units=['pixel', 'pixel'])
ds_spec_ind, ds_spec_vals = build_ind_val_dsets([1],
is_spectral=True,
labels=['Image'])
ds_pos_ind, ds_pos_val = build_ind_val_dsets((usize, vsize),
is_spectral=False,
labels=['X', 'Y'],
units=['pixel', 'pixel'])
ds_chunking = calc_chunks([num_pixels, 1],
data_type(0).itemsize,
......@@ -175,14 +174,14 @@ class ImageTranslator(Translator):
'Spectroscopic_Indices',
'Spectroscopic_Values']
linkformain(h5_main, *getH5DsetRefs(aux_ds_names, h5_refs))
link_as_main(h5_main, *getH5DsetRefs(aux_ds_names, h5_refs))
self.hdf.flush()
return h5_main
@staticmethod
def _parsefilepath(image_path):
def _parse_file_path(image_path):
"""
Returns a list of all files in the directory given by path
......
......@@ -11,9 +11,9 @@ from skimage.measure import block_reduce
from skimage.util import crop
from ..io_image import read_image, read_dm3, parse_dm4_parms
from .translator import Translator
from .utils import generateDummyMainParms, makePositionMat, getSpectralSlicing, \
getPositionSlicing
from ..hdf_utils import getH5DsetRefs, calc_chunks, linkformain
from .utils import generate_dummy_main_parms, make_position_mat, get_spectral_slicing, \
get_position_slicing, build_ind_val_dsets
from ..hdf_utils import getH5DsetRefs, calc_chunks, link_as_main
from ..io_hdf5 import ioHDF5
from ..microdata import MicroDataGroup, MicroDataset
from .. import dm4reader
......@@ -98,7 +98,7 @@ class OneViewTranslator(Translator):
Get the list of all files with the .tif extension and
the number of files in the list
'''
root_file_list, file_list = self._parsefilepath(image_path)
root_file_list, file_list = self._parse_file_path(image_path)
size, image_parms = self._getimageparms(file_list[0])
usize, vsize = size
......@@ -197,12 +197,12 @@ class OneViewTranslator(Translator):
'''
Build Spectroscopic and Position datasets for the image
'''
pos_mat = makePositionMat(image.shape)
pos_mat = make_position_mat(image.shape)
spec_mat = np.array([[0]], dtype=np.uint8)
ds_spec_inds = MicroDataset('Spectroscopic_Indices', spec_mat)
ds_spec_vals = MicroDataset('Spectroscopic_Values', spec_mat, dtype=np.float32)
spec_lab = getSpectralSlicing(['Image'])
spec_lab = get_spectral_slicing(['Image'])
ds_spec_inds.attrs['labels'] = spec_lab
ds_spec_inds.attrs['units'] = ''
ds_spec_vals.attrs['labels'] = spec_lab
......@@ -211,7 +211,7 @@ class OneViewTranslator(Translator):
ds_pos_inds = MicroDataset('Position_Indices', pos_mat)
ds_pos_vals = MicroDataset('Position_Values', pos_mat, dtype=np.float32)
pos_lab = getPositionSlicing(['X', 'Y'])
pos_lab = get_position_slicing(['X', 'Y'])
ds_pos_inds.attrs['labels'] = pos_lab
ds_pos_inds.attrs['units'] = ['pixel', 'pixel']
ds_pos_vals.attrs['labels'] = pos_lab
......@@ -231,7 +231,7 @@ class OneViewTranslator(Translator):
Link references to raw
'''
aux_ds_names = ['Position_Indices', 'Position_Values', 'Spectroscopic_Indices', 'Spectroscopic_Values']
linkformain(h5_image, *getH5DsetRefs(aux_ds_names, image_refs))
link_as_main(h5_image, *getH5DsetRefs(aux_ds_names, image_refs))
self.root_image_list.append(h5_image)
......@@ -351,7 +351,7 @@ class OneViewTranslator(Translator):
return ronc_mat3_mean.reshape(-1)
@staticmethod
def _parsefilepath(image_folder):
def _parse_file_path(image_folder):
"""
Returns a list of all files in the directory given by path
......@@ -441,7 +441,7 @@ class OneViewTranslator(Translator):
num_pixels = usize * vsize
num_files = scan_size_x * scan_size_y
root_parms = generateDummyMainParms()
root_parms = generate_dummy_main_parms()
root_parms['data_type'] = 'PtychographyData'
main_parms = {'num_images': num_files,
......@@ -460,10 +460,10 @@ class OneViewTranslator(Translator):
meas_grp.attrs = main_parms
chan_grp = MicroDataGroup('Channel_000')
# Get the Position and Spectroscopic Datasets
ds_spec_ind, ds_spec_vals = self._build_ind_val_dsets((usize, vsize), is_spectral=True,
labels=['U', 'V'], units=['pixel', 'pixel'])
ds_pos_ind, ds_pos_val = self._build_ind_val_dsets([scan_size_x, scan_size_y], is_spectral=False,
labels=['X', 'Y'], units=['pixel', 'pixel'])
ds_spec_ind, ds_spec_vals = build_ind_val_dsets((usize, vsize), is_spectral=True,
labels=['U', 'V'], units=['pixel', 'pixel'])
ds_pos_ind, ds_pos_val = build_ind_val_dsets([scan_size_x, scan_size_y], is_spectral=False,
labels=['X', 'Y'], units=['pixel', 'pixel'])
ds_chunking = calc_chunks([num_files, num_pixels],
data_type(0).itemsize,
......@@ -496,7 +496,7 @@ class OneViewTranslator(Translator):
'Spectroscopic_Indices',
'Spectroscopic_Values']
linkformain(h5_main, *getH5DsetRefs(aux_ds_names, h5_refs))
link_as_main(h5_main, *getH5DsetRefs(aux_ds_names, h5_refs))
self.hdf.flush()
......
......@@ -13,8 +13,8 @@ from skimage.data import imread
from skimage.measure import block_reduce
from ..io_image import read_image, read_dm3
from .translator import Translator
from .utils import generateDummyMainParms
from ..hdf_utils import getH5DsetRefs, calc_chunks, linkformain
from .utils import generate_dummy_main_parms, build_ind_val_dsets
from ..hdf_utils import getH5DsetRefs, calc_chunks, link_as_main
from ..io_hdf5 import ioHDF5
from ..microdata import MicroDataGroup, MicroDataset
......@@ -87,7 +87,7 @@ class PtychographyTranslator(Translator):
vsize = image_parms['SuperScan_Width']
data_type = images.dtype
else:
file_list = self._parsefilepath(image_path, image_type)
file_list = self._parse_file_path(image_path, image_type)
# Set up the basic parameters associated with this set of images
(usize, vsize), data_type = self._getimagesize(os.path.join(image_path, file_list[0]))
......@@ -192,7 +192,7 @@ class PtychographyTranslator(Translator):
# return ronc_mat3_mean.reshape(-1)
@staticmethod
def _parsefilepath(path, ftype='all'):
def _parse_file_path(path, ftype='all'):
"""
Returns a list of all files in the directory given by path
......@@ -287,7 +287,7 @@ class PtychographyTranslator(Translator):
num_pixels = usize*vsize
num_files = scan_size_x*scan_size_y
root_parms = generateDummyMainParms()
root_parms = generate_dummy_main_parms()
root_parms['data_type'] = 'PtychographyData'
main_parms = {'num_images': num_files,
......@@ -305,14 +305,14 @@ class PtychographyTranslator(Translator):
chan_grp = MicroDataGroup('Channel_000')
# Get the Position and Spectroscopic Datasets
# ds_spec_ind, ds_spec_vals = self._buildspectroscopicdatasets(usize, vsize, num_pixels)
ds_spec_ind, ds_spec_vals = self._build_ind_val_dsets((usize, vsize),
is_spectral=True,
labels=['U', 'V'],
units=['pixel', 'pixel'])
ds_pos_ind, ds_pos_val = self._build_ind_val_dsets([scan_size_x, scan_size_y],
is_spectral=False,
labels=['X', 'Y'],
units=['pixel', 'pixel'])
ds_spec_ind, ds_spec_vals = build_ind_val_dsets((usize, vsize),
is_spectral=True,
labels=['U', 'V'],
units=['pixel', 'pixel'])
ds_pos_ind, ds_pos_val = build_ind_val_dsets([scan_size_x, scan_size_y],
is_spectral=False,
labels=['X', 'Y'],
units=['pixel', 'pixel'])
ds_chunking = calc_chunks([num_files, num_pixels],
data_type(0).itemsize,
......@@ -345,7 +345,7 @@ class PtychographyTranslator(Translator):
'Spectroscopic_Indices',
'Spectroscopic_Values']
linkformain(h5_main, *getH5DsetRefs(aux_ds_names, h5_refs))
link_as_main(h5_main, *getH5DsetRefs(aux_ds_names, h5_refs))
self.hdf.flush()
......
......@@ -14,7 +14,7 @@ import numpy as np; # For array operations
from scipy.io.matlab import loadmat # To load parameters stored in Matlab .mat file
from .translator import Translator # Because this class extends the abstract Translator class
from .utils import makePositionMat, getPositionSlicing, generateDummyMainParms
from .utils import make_position_mat, get_position_slicing, generate_dummy_main_parms
from ..hdf_utils import getH5DsetRefs, linkRefs
from ..io_hdf5 import