Commit d8e10f50 authored by Unknown's avatar Unknown
Browse files

Docstring and PEP8 cleanup

parent 1b0a093d
......@@ -30,31 +30,12 @@ from ..io.microdata import MicroDataset, MicroDataGroup
'''
Custom dtypes for the datasets created during fitting.
'''
# loop_metrics32 = np.dtype([('Area', np.float32),
# ('Centroid x', np.float32),
# ('Centroid y', np.float32),
# ('Rotation Angle [rad]', np.float32),
# ('Offset', np.float32)])
loop_metrics32 = np.dtype({'names': ['Area', 'Centroid x', 'Centroid y', 'Rotation Angle [rad]', 'Offset'],
'formats': [np.float32, np.float32, np.float32, np.float32, np.float32]})
# crit32 = np.dtype([('AIC_loop', np.float32),
# ('BIC_loop', np.float32),
# ('AIC_line', np.float32),
# ('BIC_line', np.float32)])
crit32 = np.dtype({'names': ['AIC_loop', 'BIC_loop', 'AIC_line', 'BIC_line'],
'formats': [np.float32, np.float32, np.float32, np.float32]})
# loop_fit32 = np.dtype([('a_0', np.float32),
# ('a_1', np.float32),
# ('a_2', np.float32),
# ('a_3', np.float32),
# ('a_4', np.float32),
# ('b_0', np.float32),
# ('b_1', np.float32),
# ('b_2', np.float32),
# ('b_3', np.float32),
# ('R2 Criterion', np.float32)])
field_names = ['a_0', 'a_1', 'a_2', 'a_3', 'a_4', 'b_0', 'b_1', 'b_2', 'b_3', 'R2 Criterion']
loop_fit32 = np.dtype({'names': field_names,
'formats': [np.float32 for name in field_names]})
......@@ -104,6 +85,7 @@ class BELoopModel(Model):
self._sho_all_but_dc_forc_inds = None
self._met_all_but_forc_inds = None
self._current_forc = 0
self._maxDataChunk = 1
def _is_legal(self, h5_main, variables=['DC_Offset']):
"""
......
......@@ -50,6 +50,8 @@ class BESHOmodel(Model):
self.is_reshapable = True
self.num_udvs_steps = None
self.freq_vec = None
self._maxDataChunk = 1
self._max_pos_per_read = 1
def _create_guess_datasets(self):
"""
......
......@@ -27,8 +27,13 @@ class Fit_Methods(object):
Generates the single Harmonic Oscillator response over the given vector
Parameters
-----------
----------
guess : array-like
The set of guess parameters to be tested
data_vec : numpy.ndarray
The data vector to compare the current guess against
freq_vector : numpy.ndarray
The frequencies that correspond to each data point in `data_vec`
args : list or tuple
SHO parameters=(Amp,w0,Q,phi,vector). vector: 1D np.array of frequency values.
Amp: amplitude.
......@@ -70,12 +75,16 @@ class BE_Fit_Methods(object):
Parameters
----------
coef_vec : numpy.ndarray
data_vec : numpy.ndarray
dc_vec : numpy.ndarray
The DC offset vector
args : list
Returns
-------
fitness : float
The 1-r^2 value for the current set of loop coefficients
"""
......@@ -94,38 +103,6 @@ class BE_Fit_Methods(object):
return 1 - r_squared
# @staticmethod
# def BE_LOOP(dc_vec, *args):
# """
#
# Parameters
# ----------
# dc_vec : numpy.ndarray
# The DC offset vector
# args : list
#
# Returns
# -------
#
# """
# def loop_func(coef_vec, data_vec):
# if coef_vec.size < 9:
# raise ValueError('Error: The Loop Fit requires 9 parameter guesses!')
#
# data_mean = np.mean(data_vec)
#
# func = loop_fit_function(dc_vec, coef_vec)
#
# ss_tot = sum(abs(data_vec - data_mean) ** 2)
# ss_res = sum(abs(data_vec - func) ** 2)
#
# r_squared = 1 - ss_res / ss_tot if ss_tot > 0 else 0
#
# return 1 - r_squared
#
# return loop_func
class forc_iv_fit_methods(Fit_Methods):
"""
Any fitting methods specific to FORC_IV should go here.
......
......@@ -58,6 +58,10 @@ class Optimize(object):
warn('Error: data and guess must be numpy.ndarray. Exiting...')
sys.exit()
self._parallel = parallel
self.strategy = None
self.options = None
self.solver_type = None
self.solver_options = None
def _guessFunc(self):
gm = GuessMethods()
......
......@@ -30,10 +30,12 @@ class BEodfTranslator(Translator):
def __init__(self, *args, **kwargs):
super(BEodfTranslator, self).__init__(*args, **kwargs)
self.hdf = None
self.h5_raw = None
self.num_rand_spectra = kwargs.pop('num_rand_spectra', 1000)
self.FFT_BE_wave = None
self.signal_type = None
self.expt_type = None
def translate(self, file_path, show_plots=True, save_plots=True, do_histogram=False, verbose=False):
"""
......
......@@ -31,6 +31,14 @@ class BEodfRelaxationTranslator(Translator):
It will not work for in-field. This should be fixed at a later date.
"""
def __init__(self, max_mem_mb=1024):
super(BEodfRelaxationTranslator, self).__init__(max_mem_mb)
self.FFT_BE_wave = None
self.hdf = None
self.ds_main = None
self.mean_resp = None
self.max_resp = None
self.min_resp = None
def translate(self, file_path, show_plots=True, save_plots=True, do_histogram=False):
"""
......@@ -147,7 +155,7 @@ class BEodfRelaxationTranslator(Translator):
num_actual_udvs_steps = int(num_actual_udvs_steps)
stind = 0
for step_index in xrange(UDVS_mat.shape[0]):
for step_index in range(UDVS_mat.shape[0]):
if UDVS_mat[step_index, 2] < 1E-3: # invalid AC amplitude
continue # skip
spec_inds[0, stind:stind + bins_per_step] = np.arange(bins_per_step, dtype=np.uint32) # Bin step
......
......@@ -55,6 +55,12 @@ class FakeBEPSGenerator(Translator):
self.n_spec_bins = None
self.n_fields = None
self.binning_func = no_bin
self.cycle_fraction = None
self.h5_path = None
self.image_ext = None
self.rebin = None
self.bin_factor = None
self.bin_func = None
def _read_data(self, folder):
"""
......
......@@ -30,6 +30,28 @@ class BEPSndfTranslator(Translator):
files to .h5
"""
def __init__(self, *args, **kwargs):
super(BEPSndfTranslator, self).__init__(*args, **kwargs)
self.debug = False
self.parm_dict = dict()
self.field_mode = None
self.spec_label = None
self.halve_udvs_steps = None
self.BE_wave = None
self.BE_wave_rev = None
self.BE_bin_inds = None
self.udvs_mat = None
self.udvs_labs = None
self.udvs_units = None
self.num_udvs_steps = None
self.excit_type_vec = None
self.__unique_waves__ = None
self.__num_wave_types__ = None
self.max_pixels = None
self.pos_labels = None
self.pos_mat = None
self.pos_units = None
self.hdf = None
def translate(self, data_filepath, show_plots=True, save_plots=True, do_histogram=False, debug=False):
"""
......
......@@ -1184,12 +1184,21 @@ BEHistogram Class and Functions
class BEHistogram:
# TODO: Turn into proper class
# TODO: Parallelize Histogram generation
# TODO: Make into Process class
"""
Class just functions as a container so we can have shared objects
Chris Smith -- csmith55@utk.edu
"""
def __init__(self):
self.max_mem = None
self.max_response = None
self.min_response = None
self.num_udvs_steps = 1
self.N_spectral_steps = 1
self.N_bins = 1
self.N_freqs = 1
self.N_pixels = 1
self.N_y_bins = 1
def addBEHist(self, h5_path, max_mem_mb=1024, show_plot=True, save_plot=True):
"""
......
......@@ -170,7 +170,7 @@ class Grid(NanonisFile):
def __init__(self, fname):
_is_valid_file(fname, ext='3ds')
super().__init__(fname)
super(NanonisFile, self).__init__(fname)
self.header = _parse_3ds_header(self.header_raw)
self.signals = self._load_data()
self.signals['sweep_signal'] = self._derive_sweep_signal()
......@@ -296,7 +296,7 @@ class Scan(NanonisFile):
def __init__(self, fname):
_is_valid_file(fname, ext='sxm')
super().__init__(fname)
super(NanonisFile, self).__init__(fname)
self.header = _parse_sxm_header(self.header_raw)
# data begins with 4 byte code, add 4 bytes to offset instead
......@@ -368,7 +368,7 @@ class Spec(NanonisFile):
def __init__(self, fname):
_is_valid_file(fname, ext='dat')
super().__init__(fname)
super(NanonisFile, self).__init__(fname)
self.header = _parse_dat_header(self.header_raw)
self.signals = self._load_data()
......
......@@ -21,6 +21,9 @@ class ForcIVTranslator(Translator):
"""
Translates FORC IV datasets from .mat files to .h5
"""
def __init__(self, *args, **kwargs):
super(ForcIVTranslator, self).__init__(*args, **kwargs)
self.h5_read = None
def _read_data(self):
pass
......
......@@ -24,6 +24,9 @@ class GIVTranslator(Translator):
"""
Translates G-mode Fast IV datasets from .mat files to .h5
"""
def __init__(self, *args, **kwargs):
super(GIVTranslator, self).__init__(*args, **kwargs)
self.raw_datasets = None
def _parse_file_path(self, input_path):
pass
......
......@@ -24,7 +24,12 @@ class GLineTranslator(Translator):
"""
Translated G-mode line (bigtimedata.dat) files from actual BE line experiments to HDF5
"""
def __init__(self, *args, **kwargs):
super(Translator, self).__init__(*args, **kwargs)
self.points_per_pixel = 1
self.num_rows = 1
self.__bytes_per_row__ = 1
def translate(self, file_path):
"""
The main function that translates the provided file into a .h5 file
......
......@@ -27,6 +27,9 @@ class GTuneTranslator(GLineTranslator):
Translates G-mode Tune (bigtimedata.dat) files from actual BE line experiments to HDF5
"""
def __init__(self, *args, **kwargs):
super(GLineTranslator, self).__init__(*args, **kwargs)
def translate(self, file_path):
"""
The main function that translates the provided file into a .h5 file
......
......@@ -31,7 +31,7 @@ class LabViewH5Patcher(Translator):
def _read_data(self):
pass
def translate(self, h5_path, force_patch=False):
def translate(self, h5_path, force_patch=False, **kwargs):
"""
Add the needed references and attributes to the h5 file that are not created by the
LabView data aquisition program.
......
......@@ -23,6 +23,9 @@ class TRKPFMTranslator(Translator):
"""
Translates trKPFM datasets from .mat and .dat files to .h5
"""
def __init__(self, *args, **kwargs):
super(TRKPFMTranslator, self).__init__(*args, **kwargs)
self.raw_datasets = None
def _parse_file_path(self, input_path):
folder_path, base_name = path.split(input_path)
......
......@@ -108,7 +108,7 @@ class FeatureExtractorParallel(object):
Parameters
----------
processors : int, optional
processes : int, optional
Number of processors to use, default = 1.
mask : boolean, optional, default False.
Whether to use
......
......@@ -66,6 +66,9 @@ class GIVBayesian(Process):
self.roll_pts = int(self.single_ao.size * roll_cyc_fract)
self.rolled_bias = np.roll(self.single_ao, self.roll_pts)
self.reverse_results = None
self.forward_results = None
def _set_memory_and_cores(self, cores=1, mem=1024):
"""
Checks hardware limitations such as memory, # cpus and sets the recommended datachunk sizes and the
......@@ -238,7 +241,6 @@ class GIVBayesian(Process):
Parameters
----------
None
Returns
-------
......
......@@ -20,12 +20,8 @@ from ..io.io_hdf5 import ioHDF5
from ..io.io_utils import getAvailableMem
from ..io.microdata import MicroDataGroup, MicroDataset
from ..io.translators.utils import get_position_slicing, make_position_mat, get_spectral_slicing
from .svd_utils import _get_component_slice
from .svd_utils import get_component_slice
# windata32 = np.dtype([('Image Data', np.float32)])
# absfft32 = np.dtype([('FFT Magnitude', np.float32)])
# winabsfft32 = np.dtype([('Image Data', np.float32), ('FFT Magnitude', np.float32)])
# wincompfft32 = np.dtype([('Image Data', np.float32), ('FFT Real', np.float32), ('FFT Imag', np.float32)])
windata32 = np.dtype({'names': ['Image Data'],
'formats': [np.float32]})
absfft32 = np.dtype({'names': ['FFT Magnitude'],
......@@ -799,7 +795,7 @@ class ImageWindow(object):
print('Cleaning the image by removing unwanted components.')
comp_slice = _get_component_slice(components)
comp_slice = get_component_slice(components)
'''
Read the 1st n_comp components from the SVD results
......@@ -970,7 +966,7 @@ class ImageWindow(object):
return
print('Cleaning the image by removing unwanted components.')
comp_slice = _get_component_slice(components)
comp_slice = get_component_slice(components)
'''
Read the 1st n_comp components from the SVD results
......
......@@ -785,7 +785,8 @@ class geoTransformerParallel(object):
Parameters
----------
input: h5py.dataset
dataset: h5py.dataset
The dataset to be corrected
"""
if not isinstance(dataset, h5py.Dataset):
......@@ -1048,7 +1049,7 @@ class geoTransformerParallel(object):
Parameters
----------
Processors: int, optional
processes: int, optional
Number of processors to use, default = 1.
Returns
......@@ -1197,7 +1198,7 @@ class geoTransformerSerial(object):
return matches, filt_matches
# TODO: Need Better Error Handling.
def findTransformation(self, transform, matches, processes, **kwargs):
def findTransformation(self, transform, matches, **kwargs):
"""
This is a Method that finds the optimal transformation between two images
given matching features using a random sample consensus.
......@@ -1207,8 +1208,6 @@ class geoTransformerSerial(object):
transform : skimage.transform object
matches : list
matches found through match_features method.
processors : int
Number of processors to use.
**kwargs are passed to skimage.transform.ransac
Returns
......@@ -1351,7 +1350,7 @@ class geoTransformerSerial(object):
Parameters
----------
Processors: int, optional
processes: int, optional
Number of processors to use, default = 1.
Returns
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment