Commit e85c5fe8 authored by Chris Smith's avatar Chris Smith
Browse files

Merge branch 'master' into cades_dev

# Conflicts:
#	pycroscopy/io/hdf_utils.py
#	pycroscopy/io/translators/beps_data_generator.py
parents 0c161fef 4285d730
......@@ -39,9 +39,20 @@ Short tutorials on how to use pycroscopy
Longer examples (probably scientific workflows / pipelines)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Analysis
* How to use Optimize
* Visualization
Done:
* Data formatting in pycroscopy
* How to write a Translator
* How to write (back) to H5
* Spectral Unmixing with pycroscopy
* Basic introduction to loading data in pycroscopy
Pending:
* Handling multidimensional (6D) datasets - work in progress
* Visualizing data (interactively using widgets) - yet to begin
* How to write your write your own parallel computing function using the (yet to be written) process module
Rama's tutorial goal
~~~~~~~~~~~~~~~~~~~~
......@@ -49,7 +60,7 @@ Rama's tutorial goal
2. Continuing above, determine the average of the quality factor coming from cycles 1,3,4 for spatial points stored in vector b for the on-field part for a predetermined voltage range given by endpoints [e,f]. Compare the results with the SHO guess and fit for the quality factor.
3. After opening a h5 file containing results from a relaxation experiment, plot the response at a particular point and voltage, run exponential fitting and then store the results of the fit in the same h5 file using iohdf and/or numpy translators.
4. Take a FORC IV ESM dataset and break it up into forward and reverse branches, along with positive and negative branches. Do correlation analysis between PFM and IV for different branches and store the results in the file, and readily access them for plotting again.
5. A guide to using the model fitter for parallel fitting of numpy array-style datasets. This one can be merged with number 3.
5. A guide to using the model fitter for parallel fitting of numpy array-style datasets. This one can be merged with number
Documentation
-------------
......@@ -102,16 +113,23 @@ We have two kinds of large computational jobs and one kind of large I/O job:
* Computation
1. Machine learning and Statistics
1.1. Either use custom algorithms developed for BEAM
1.1. Use custom algorithms developed for BEAM
* Advantage - Optimized (and tested) for various HPC environments
* Disadvantages:
* Need to integarate non-python code
* We only have a handful of these. NOT future compatible
    1.2. Or continue using a single FAT node for these jobs
    1.2. OR continue using a single FAT node for these jobs
* Advantages:
* No optimization required
* Continue using the same scikit learn packages
* Disadvantage - Is not optimized for HPC
1.3. OR use pbdR / write pbdPy (wrappers around pbdR)
* Advantages:
* Already optimized / mature project
* In-house project (good support)
* Disadvantages:
* Dependant on pbdR for implementing new algorithms
2. Parallel parametric search - analyze subpackage and some user defined functions in processing. Can be extended using:
* Dask - An inplace replacement of multiprocessing will work on laptops and clusters. More elegant and easier to write and maintain compared to MPI at the cost of efficiency
......
This diff is collapsed.
"""
An Example on writing examples
==============================
The example docstring will be printed as text along with the code of the example.
It should contain the desciption of the example code.
Only examples that begine with plot_* will be run when generating the docs.
"""
# Code source: Chris Smith
# Liscense: MIT
print('I am an example')
version = '0.0.53'
date = '8/14/2017'
time = '9:41:32'
\ No newline at end of file
time = '9:41:32'
......@@ -59,6 +59,7 @@ field_names = ['a_0', 'a_1', 'a_2', 'a_3', 'a_4', 'b_0', 'b_1', 'b_2', 'b_3', 'R
loop_fit32 = np.dtype({'names': field_names,
'formats': [np.float32 for name in field_names]})
class BELoopModel(Model):
"""
Analysis of Band excitation loops using functional fits
......@@ -425,7 +426,7 @@ class BELoopModel(Model):
h5_loop_parm : h5py.Dataset
Dataset of physical parameters
"""
dset_name = h5_loop_fit.name+'_Loop_Parameters'
dset_name = h5_loop_fit.name + '_Loop_Parameters'
h5_loop_parameters = create_empty_dataset(h5_loop_fit, dtype=switching32,
dset_name=dset_name,
new_attrs={'nuc_threshold': nuc_threshold})
......@@ -894,7 +895,7 @@ class BELoopModel(Model):
self.sho_spec_inds_per_forc * (self._current_forc + 1))
self._end_pos = int(min(self.h5_main.shape[0], self._start_pos + self.max_pos))
self.data = self.h5_main[self._start_pos:self._end_pos, self._current_sho_spec_slice]
elif self._current_forc < self._num_forcs-1:
elif self._current_forc < self._num_forcs - 1:
# Resest for next FORC
self._current_forc += 1
......@@ -927,7 +928,7 @@ class BELoopModel(Model):
self.sho_spec_inds_per_forc * (self._current_forc + 1))
self._end_pos = int(min(self.h5_projected_loops.shape[0], self._start_pos + self.max_pos))
self.data = self.h5_projected_loops[self._start_pos:self._end_pos, self._current_sho_spec_slice]
elif self._current_forc < self._num_forcs-1:
elif self._current_forc < self._num_forcs - 1:
# Resest for next FORC
self._current_forc += 1
......@@ -945,7 +946,7 @@ class BELoopModel(Model):
self.data = None
guess = self.h5_guess[self._start_pos:self._end_pos,
self._current_met_spec_slice].reshape([-1, 1])
self._current_met_spec_slice].reshape([-1, 1])
self.guess = compound_to_scalar(guess)[:, :-1]
def _create_guess_datasets(self):
......@@ -1052,7 +1053,7 @@ class BELoopModel(Model):
pix_inds = np.where(labels == clust_id)[0]
temp = np.atleast_2d(loop_fit_results[clust_id][0].x)
# convert to the appropriate dtype as well:
r2 = 1-np.sum(np.abs(loop_fit_results[clust_id][0].fun**2))
r2 = 1 - np.sum(np.abs(loop_fit_results[clust_id][0].fun ** 2))
guess_parms[pix_inds] = realToCompound(np.hstack([temp, np.atleast_2d(r2)]), loop_fit32)
return guess_parms
......@@ -1126,6 +1127,7 @@ class LoopOptimize(Optimize):
"""
Subclass of Optimize with BE Loop specific changes
"""
def _initiateSolverAndObjFunc(self):
if self.solver_type in scipy.optimize.__dict__.keys():
solver = scipy.optimize.__dict__[self.solver_type]
......
......@@ -281,7 +281,8 @@ class BESHOmodel(Model):
Default None, output of psutil.cpu_count - 2 is used
strategy: string
Default is 'Wavelet_Peaks'.
Can be one of ['wavelet_peaks', 'relative_maximum', 'gaussian_processes']. For updated list, run GuessMethods.methods
Can be one of ['wavelet_peaks', 'relative_maximum', 'gaussian_processes']. For updated list, run
GuessMethods.methods
options: dict
Default Options for wavelet_peaks{"peaks_widths": np.array([10,200]), "peak_step":20}.
Dictionary of options passed to strategy. For more info see GuessMethods documentation.
......@@ -428,7 +429,8 @@ class BESHOmodel(Model):
sho_vec['Frequency [Hz]'] = self.freq_vec[peak_inds] # Frequency
sho_vec['Quality Factor'] = np.ones_like(comp_vals) * 10 # Quality factor
# Add something here for the R^2
sho_vec['R2 Criterion'] = np.array([self.r_square(self.data, self._sho_func, self.freq_vec, sho_parms) for sho_parms in sho_vec])
sho_vec['R2 Criterion'] = np.array([self.r_square(self.data, self._sho_func, self.freq_vec, sho_parms)
for sho_parms in sho_vec])
elif strategy in ['complex_gaussian']:
for iresult, result in enumerate(results):
sho_vec['Amplitude [V]'][iresult] = result[0]
......
......@@ -21,6 +21,7 @@ class GuessMethods(object):
input and return the guess parameters. The guess methods here use the keyword arguments to configure the returned
function.
"""
def __init__(self):
self.methods = ['wavelet_peaks', 'relative_maximum', 'gaussian_processes', 'complex_gaussian']
......@@ -72,7 +73,7 @@ class GuessMethods(object):
warn('Error: Please specify "peak_widths" kwarg to use this method')
@staticmethod
def absolute_maximum( *args, **kwargs):
def absolute_maximum(*args, **kwargs):
"""
Finds maximum in 1d-array
Parameters
......@@ -84,9 +85,11 @@ class GuessMethods(object):
-------
fastpeak: callable function
"""
def fastpeak(vector):
vec_max = np.argmax(vector)
return vec_max
return fastpeak
@staticmethod
......@@ -184,4 +187,4 @@ def r_square(data_vec, func, *args, **kwargs):
r_squared = 1 - ss_res / ss_tot if ss_tot > 0 else 0
return r_squared
\ No newline at end of file
return r_squared
......@@ -37,10 +37,11 @@ class Model(object):
None
"""
def __init__(self, h5_main, variables=['Frequency'], parallel=True):
"""
For now, we assume that the guess dataset has not been generated for this dataset but we will relax this requirement
after testing the basic components.
For now, we assume that the guess dataset has not been generated for this dataset but we will relax this
requirement after testing the basic components.
"""
# Checking if dataset is "Main"
......@@ -90,7 +91,7 @@ class Model(object):
if self._maxCpus == 1:
self._parallel = False
self._maxMemoryMB = getAvailableMem() / 1024**2 # in Mb
self._maxMemoryMB = getAvailableMem() / 1024 ** 2 # in Mb
self._maxDataChunk = int(self._maxMemoryMB / self._maxCpus)
......@@ -161,7 +162,6 @@ class Model(object):
print('Finished reading all data!')
self.data = None
def _get_guess_chunk(self):
"""
Returns a chunk of guess dataset corresponding to the main dataset.
......@@ -252,11 +252,11 @@ class Model(object):
"""
warn('Please override the _create_fit_datasets specific to your model')
self.fit = None # replace with actual h5 dataset
self.fit = None # replace with actual h5 dataset
pass
def do_guess(self, processors=None, strategy='wavelet_peaks',
options={"peak_widths": np.array([10, 200]), "peak_step":20}):
options={"peak_widths": np.array([10, 200]), "peak_step": 20}):
"""
Parameters
......
......@@ -77,7 +77,8 @@ class Optimize(object):
Number of logical cores to use for computing
strategy: string
Default is 'Wavelet_Peaks'.
Can be one of ['wavelet_peaks', 'relative_maximum', 'gaussian_processes']. For updated list, run GuessMethods.methods
Can be one of ['wavelet_peaks', 'relative_maximum', 'gaussian_processes']. For updated list,
run GuessMethods.methods
options: dict
Default: Options for wavelet_peaks{"peaks_widths": np.array([10,200]), "peak_step":20}.
Dictionary of options passed to strategy. For more info see GuessMethods documentation.
......
from unittest import TestCase
class Test(TestCase):
def test_this_module(self):
pass
......@@ -34,6 +34,7 @@ atom_dtype = np.dtype({'names': ['x', 'y', 'type'],
atom_coeff_dtype = np.dtype({'names': ['Amplitude', 'x', 'y', 'Sigma'],
'formats': [np.float32, np.float32, np.float32, np.float32]})
def multi_gauss_surface_fit(coef_mat, s_mat):
"""
Evaluates the provided coefficients for N gaussian peaks to generate a 2D matrix
......@@ -233,7 +234,7 @@ def fit_atom_positions_parallel(parm_dict, fitting_parms, num_cores=None):
all_atom_guesses = parm_dict['atom_pos_guess']
t_start = tm.time()
num_cores = recommendCores(all_atom_guesses.shape[0], requested_cores=num_cores, lengthy_computation=False)
if num_cores>1:
if num_cores > 1:
pool = mp.Pool(processes=num_cores)
parm_list = itt.izip(range(all_atom_guesses.shape[0]), itt.repeat(parm_dict), itt.repeat(fitting_parms))
chunk = int(all_atom_guesses.shape[0] / num_cores)
......@@ -316,7 +317,7 @@ def fit_atom_positions_dset(h5_grp, fitting_parms=None, num_cores=None):
fitting_results = fit_atom_positions_parallel(parm_dict, fitting_parms, num_cores=num_cores)
# Make datasets to write back to file:
guess_parms = np.zeros(shape=(num_atoms, num_nearest_neighbors+1), dtype=atom_coeff_dtype)
guess_parms = np.zeros(shape=(num_atoms, num_nearest_neighbors + 1), dtype=atom_coeff_dtype)
fit_parms = np.zeros(shape=guess_parms.shape, dtype=guess_parms.dtype)
for atom_ind, single_atom_results in enumerate(fitting_results):
......@@ -381,7 +382,9 @@ def visualize_atom_fit(atom_rough_pos, all_atom_guesses, parm_dict, fitting_parm
atom_ind = np.argsort(temp_dist)[0]
parm_dict['verbose'] = True
coef_guess_mat, lb_mat, ub_mat, coef_fit_mat, fit_region, s_mat, plsq = fit_atom_pos((atom_ind, parm_dict, fitting_parms))
coef_guess_mat, lb_mat, ub_mat, coef_fit_mat, fit_region, s_mat, plsq = fit_atom_pos((atom_ind,
parm_dict,
fitting_parms))
print('\tAmplitude\tx position\ty position\tsigma')
print('-------------------GUESS---------------------')
......@@ -500,7 +503,7 @@ def remove_duplicate_labels(atom_labels, psf_width, double_cropped_image, distan
axis.imshow(double_cropped_image, interpolation='none', cmap="gray")
axis.scatter(all_atom_pos[culprits[:, 0], 1], all_atom_pos[culprits[:, 0], 0], color='yellow')
axis.scatter(all_atom_pos[culprits[:, 1], 1], all_atom_pos[culprits[:, 1], 0], color='red')
axis.scatter(all_atom_pos[good_atom_inds, 1], all_atom_pos[good_atom_inds, 0], color='cyan');
axis.scatter(all_atom_pos[good_atom_inds, 1], all_atom_pos[good_atom_inds, 0], color='cyan')
# Now classify the culprit pairs into the correct family
classifier = KNeighborsClassifier(n_neighbors=num_neighbors)
......@@ -531,9 +534,9 @@ def remove_duplicate_labels(atom_labels, psf_width, double_cropped_image, distan
row_ind = int(np.round(all_atom_pos[atom_ind, 0]))
col_ind = int(np.round(all_atom_pos[atom_ind, 1]))
img_section = double_cropped_image[max(0, row_ind - neighbor_size):
min(double_cropped_image.shape[0], row_ind + neighbor_size),
max(0, col_ind - neighbor_size):
min(double_cropped_image.shape[1], col_ind + neighbor_size)]
min(double_cropped_image.shape[0], row_ind + neighbor_size),
max(0, col_ind - neighbor_size):
min(double_cropped_image.shape[1], col_ind + neighbor_size)]
amplitude_pair.append(np.max(img_section))
# print amplitude_pair
if amplitude_pair[0] > amplitude_pair[1]:
......
......@@ -24,6 +24,7 @@ from ...io.io_hdf5 import ioHDF5
from ...viz import plot_utils
from ..model import Model
def do_fit(single_parm):
parms = single_parm[0]
coef_guess_mat = parms[1]
......@@ -43,9 +44,9 @@ def do_fit(single_parm):
coef_fit_mat = np.reshape(plsq.x, (-1, 7))
#if verbose:
# if verbose:
# return coef_guess_mat, lb_mat, ub_mat, coef_fit_mat, fit_region, s_mat, plsq
#else:
# else:
return coef_fit_mat
......@@ -148,7 +149,8 @@ class Gauss_Fit(object):
Parameters used for atom position fitting
'fit_region_size': region to consider when fitting. Should be large enough to see the nearest neighbors.
'num_nearest_neighbors': the number of nearest neighbors to fit
'sigma_guess': starting guess for gaussian standard deviation. Should be about the size of an atom width in pixels.
'sigma_guess': starting guess for gaussian standard deviation. Should be about the size of an atom width in
pixels.
'position_range': range that the fitted position can move from initial guess position in pixels
'max_function_evals': maximum allowed function calls; passed to the least squares fitter
'fitting_tolerance': target difference between the fit and the data
......@@ -262,12 +264,14 @@ class Gauss_Fit(object):
parm_list = itt.izip(self.guess_parms, itt.repeat(self.fitting_parms))
self.fitting_results = [do_fit(parm) for parm in parm_list]
print ('Finalizing datasets...')
self.guess_dataset = np.zeros(shape=(self.num_atoms, self.num_nearest_neighbors + 1), dtype=self.atom_coeff_dtype)
print('Finalizing datasets...')
self.guess_dataset = np.zeros(shape=(self.num_atoms, self.num_nearest_neighbors + 1),
dtype=self.atom_coeff_dtype)
self.fit_dataset = np.zeros(shape=self.guess_dataset.shape, dtype=self.guess_dataset.dtype)
for atom_ind, single_atom_results in enumerate(self.fitting_results):
types = np.hstack((self.h5_guess['type'][atom_ind], [self.h5_guess['type'][neighbor] for neighbor in self.closest_neighbors_mat[atom_ind]]))
types = np.hstack((self.h5_guess['type'][atom_ind],
[self.h5_guess['type'][neighbor] for neighbor in self.closest_neighbors_mat[atom_ind]]))
atom_data = np.hstack((np.vstack(types), single_atom_results))
atom_data = [tuple(element) for element in atom_data]
self.fit_dataset[atom_ind] = atom_data
......@@ -418,21 +422,21 @@ class Gauss_Fit(object):
ub_background.append(item + item * movement_allowance)
# Set up upper and lower bounds:
lb_mat = [lb_a, # amplitude
coef_guess_mat[:, 1] - position_range, # x position
coef_guess_mat[:, 2] - position_range, # y position
lb_mat = [lb_a, # amplitude
coef_guess_mat[:, 1] - position_range, # x position
coef_guess_mat[:, 2] - position_range, # y position
[np.max([0, value - value * movement_allowance]) for value in coef_guess_mat[:, 3]], # sigma x
[np.max([0, value - value * movement_allowance]) for value in coef_guess_mat[:, 4]], # sigma y
coef_guess_mat[:, 5] - 2 * 3.14159, # theta
lb_background] # background
coef_guess_mat[:, 5] - 2 * 3.14159, # theta
lb_background] # background
ub_mat = [ub_a, # amplitude
coef_guess_mat[:, 1] + position_range, # x position
coef_guess_mat[:, 2] + position_range, # y position
ub_mat = [ub_a, # amplitude
coef_guess_mat[:, 1] + position_range, # x position
coef_guess_mat[:, 2] + position_range, # y position
coef_guess_mat[:, 3] + coef_guess_mat[:, 3] * movement_allowance, # sigma x
coef_guess_mat[:, 4] + coef_guess_mat[:, 4] * movement_allowance, # sigma y
coef_guess_mat[:, 5] + 2 * 3.14159, # theta
ub_background] # background
coef_guess_mat[:, 5] + 2 * 3.14159, # theta
ub_background] # background
lb_mat = np.transpose(lb_mat)
ub_mat = np.transpose(ub_mat)
......@@ -451,7 +455,8 @@ class Gauss_Fit(object):
return atom_ind, coef_guess_mat, fit_region, s1, s2, lb_mat, ub_mat
def check_data(self, atom_grp):
@staticmethod
def check_data(atom_grp):
# some data checks here
try:
img = atom_grp['Cropped_Clean_Image']
......@@ -509,10 +514,12 @@ class Gauss_Fit(object):
ds_atom_fits = MicroDataset('Gaussian_Fits', data=self.fit_dataset)
ds_motif_guesses = MicroDataset('Motif_Guesses', data=self.motif_guess_dataset)
ds_motif_fits = MicroDataset('Motif_Fits', data=self.motif_converged_dataset)
ds_nearest_neighbors = MicroDataset('Nearest_Neighbor_Indices', data=self.closest_neighbors_mat, dtype=np.uint32)
ds_nearest_neighbors = MicroDataset('Nearest_Neighbor_Indices',
data=self.closest_neighbors_mat, dtype=np.uint32)
dgrp_atom_finding = MicroDataGroup(self.atom_grp.name.split('/')[-1], parent=self.atom_grp.parent.name)
dgrp_atom_finding.attrs = self.fitting_parms
dgrp_atom_finding.addChildren([ds_atom_guesses, ds_atom_fits, ds_motif_guesses, ds_motif_fits, ds_nearest_neighbors])
dgrp_atom_finding.addChildren([ds_atom_guesses, ds_atom_fits, ds_motif_guesses,
ds_motif_fits, ds_nearest_neighbors])
hdf = ioHDF5(self.atom_grp.file)
h5_atom_refs = hdf.writeData(dgrp_atom_finding)
......@@ -520,19 +527,18 @@ class Gauss_Fit(object):
return self.atom_grp
def fit_motif(self, plot_results=True):
'''
"""
Parameters
----------
plot_results: boolean (default = True)
Flag to specify whether a result summary should be plotted
plot_results: boolean (default = True)
Flag to specify whether a result summary should be plotted
Returns
-------
motif_converged_dataset: NxM numpy array of tuples where N is the number of motifs and M is the number
of nearest neighbors considered. Each tuple contains the converged parameters for a gaussian fit to
an atom in a motif window.
'''
motif_converged_dataset: NxM numpy array of tuples where N is the number of motifs and M is the number
of nearest neighbors considered. Each tuple contains the converged parameters for a gaussian fit to
an atom in a motif window.
"""
self.motif_guesses = []
self.motif_parms = []
......@@ -602,46 +608,3 @@ class Gauss_Fit(object):
fig.show()
return self.motif_converged_dataset
# if __name__=='__main__':
# file_name = r"C:\Users\o2d\Documents\pycroscopy\\test_scripts\\testing_gauss_fit\image 04.h5"
# folder_path, file_path = os.path.split(file_name)
#
# file_base_name, file_extension = file_name.rsplit('.')
# h5_file = h5py.File(file_name, mode='r+')
# # look at the data tree in the h5
# '''
# # define a small function called 'print_tree' to look at the folder tree structure
# def print_tree(parent):
# print(parent.name)
# if isinstance(parent, h5py.Group):
# for child in parent:
# print_tree(parent[child])
# '''
#
# #print('Datasets and datagroups within the file:')
# file_handle = h5_file
# #print_tree(file_handle)
#
# cropped_clean_image = h5_file['/Measurement_000/Channel_000/Raw_Data-Windowing_000/Image_Windows-SVD_000/U-Cluster_000/Labels-Atom_Finding_000/Cropped_Clean_Image']
# atom_grp = cropped_clean_image.parent
# guess_params = atom_grp['Guess_Positions']
#
# num_nearest_neighbors = 4
# psf_width = atom_grp.attrs['psf_width']
# win_size = atom_grp.attrs['motif_win_size']
#
# fitting_parms = {'fit_region_size': win_size * 0.5, # region to consider when fitting
# 'num_nearest_neighbors': num_nearest_neighbors,
# 'sigma_guess': 3, # starting guess for gaussian standard deviation
# 'position_range': win_size / 4,# range that the fitted position can go from initial guess position[pixels]
# 'max_function_evals': 100,
# 'fitting_tolerance': 1E-4,
# 'symmetric': True,
# 'background': True,
# 'movement_allowance': 5.0} # percent of movement allowed (on some parameters)
#
# foo = Gauss_Fit(atom_grp, fitting_parms)
#
#
......@@ -57,25 +57,25 @@ def calculate_loop_centroid(vdc, loop_vals):
vdc = np.squeeze(np.array(vdc))
num_steps = vdc.size
x_vals = np.zeros(num_steps - 1)
y_vals = np.zeros(num_steps - 1)
area_vals = np.zeros(num_steps - 1)
for index in range(num_steps - 1):
x_i = vdc[index]
x_i1 = vdc[index + 1]
y_i = loop_vals[index]
y_i1 = loop_vals[index + 1]
x_vals[index] = (x_i + x_i1)*(x_i*y_i1 - x_i1*y_i)
y_vals[index] = (y_i + y_i1)*(x_i*y_i1 - x_i1*y_i)
area_vals[index] = (x_i*y_i1 - x_i1*y_i)
x_vals[index] = (x_i + x_i1) * (x_i * y_i1 - x_i1 * y_i)
y_vals[index] = (y_i + y_i1) * (x_i * y_i1 - x_i1 * y_i)
area_vals[index] = (x_i * y_i1 - x_i1 * y_i)
area = 0.50 * np.sum(area_vals)
cent_x = (1.0/(6.0*area)) * np.sum(x_vals)
cent_y = (1.0/(6.0*area)) * np.sum(y_vals)
cent_x = (1.0 / (6.0 * area)) * np.sum(x_vals)
cent_y = (1.0 / (6.0 * area)) * np.sum(y_vals)
return (cent_x, cent_y), area
......@@ -185,7 +185,7 @@ def projectLoop(vdc, amp_vec, phase_vec):
pdist_vals = np.zeros(shape=len(xdat_fit))
for point in range(len(xdat_fit)):
pdist_vals[point] = np.linalg.norm(np.array([0, 0]) -
np.array([xdat_fit[point], ydat_fit[point]]))
np.array([xdat_fit[point], ydat_fit[point]]))
min_point_ind = np.where(pdist_vals == pdist_vals.min())[0][0]
min_point = np.array([xdat_fit[min_point_ind], ydat_fit[min_point_ind]])
......@@ -238,7 +238,8 @@ def projectLoop(vdc, amp_vec, phase_vec):
'Centroid': centroid, 'Geometric Area': geo_area} # Dictionary of Results from projecting
return results
###############################################################################
......@@ -262,19 +263,19 @@ def loop_fit_function(vdc, coef_vec):
a = coef_vec[:5]
b = coef_vec[5:]
d = 1000
v1 = np.asarray(vdc[:int(len(vdc) / 2)])
v2 = np.asarray(vdc[int(len(vdc) / 2):])
g1 = (b[1]-b[0])/2*(erf((v1-a[2])*d)+1)+b[0]
g2 = (b[3]-b[2])/2*(erf((v2-a[3])*d)+1)+b[2]
y1 = (g1 * erf((v1-a[2])/g1) + b[0])/(b[0]+b[1])
y2 = (g2 * erf((v2-a[3])/g2) + b[2])/(b[2]+b[3])
g1 = (b[1] - b[0]) / 2 * (erf((v1 - a[2]) * d) + 1) + b[0]
g2 = (b[3] - b[2]) / 2 * (erf((v2 - a[3]) * d) + 1) + b[2]
y1 = (g1 * erf((v1 - a[2]) / g1) + b[0]) / (b[0] + b[1])
y2 = (g2 * erf((v2 - a[3]) / g2) + b[2]) / (b[2] + b[3])
f1 = a[0] + a[1] * y1 + a[4] * v1
f2 = a[0] + a[1] * y2 + a[4] * v2
f1 = a[0] + a[1]*y1 + a[4]*v1
f2 = a[0] + a[1]*y2 + a[4]*v2
loop_eval = np.hstack((f1, f2))
return loop_eval
......@@ -313,7 +314,7 @@ def loop_fit_jacobian(vdc, coef_vec):
g2 = (b[3] - b[2]) / 2 * (erf((V2 - a[3]) * d) + 1) + b[2]
# Some useful fractions
oosqpi = 1.0/np.sqrt(np.pi)
oosqpi = 1.0 / np.sqrt(np.pi)
oob01 = 1.0 / (b[0] + b[1])
oob23 = 1.0 / (b[2] + b[3])
......@@ -325,16 +326,16 @@ def loop_fit_jacobian(vdc, coef_vec):
dg2b2 = -0.5 * (erf(V2 - a[3]) * d + 1)
dg2b3 = -dg2b2
Y1 = oob01 * (g1 * erf((V1-a[2])/g1) + b[0])
Y2 = oob23 * (g2 * erf((V2-a[3])/g2) + b[2])
Y1 = oob01 * (g1 * erf((V1 - a[2]) / g1) + b[0])
Y2 = oob23 * (g2 * erf((V2 - a[3]) / g2) + b[2])
# Derivatives of Y1 and Y2</