Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
Open sidebar
Vasudevan, Rama K
pycroscopy
Commits
66229074
Commit
66229074
authored
Aug 22, 2017
by
Unknown
Browse files
PEP8 cleanup
parent
54965ddd
Changes
49
Hide whitespace changes
Inline
Side-by-side
pycroscopy/__version__.py
View file @
66229074
version
=
'0.0.53'
date
=
'8/14/2017'
time
=
'9:41:32'
\ No newline at end of file
time
=
'9:41:32'
pycroscopy/analysis/be_loop_model.py
View file @
66229074
...
...
@@ -59,6 +59,7 @@ field_names = ['a_0', 'a_1', 'a_2', 'a_3', 'a_4', 'b_0', 'b_1', 'b_2', 'b_3', 'R
loop_fit32
=
np
.
dtype
({
'names'
:
field_names
,
'formats'
:
[
np
.
float32
for
name
in
field_names
]})
class
BELoopModel
(
Model
):
"""
Analysis of Band excitation loops using functional fits
...
...
@@ -425,7 +426,7 @@ class BELoopModel(Model):
h5_loop_parm : h5py.Dataset
Dataset of physical parameters
"""
dset_name
=
h5_loop_fit
.
name
+
'_Loop_Parameters'
dset_name
=
h5_loop_fit
.
name
+
'_Loop_Parameters'
h5_loop_parameters
=
create_empty_dataset
(
h5_loop_fit
,
dtype
=
switching32
,
dset_name
=
dset_name
,
new_attrs
=
{
'nuc_threshold'
:
nuc_threshold
})
...
...
@@ -894,7 +895,7 @@ class BELoopModel(Model):
self
.
sho_spec_inds_per_forc
*
(
self
.
_current_forc
+
1
))
self
.
_end_pos
=
int
(
min
(
self
.
h5_main
.
shape
[
0
],
self
.
_start_pos
+
self
.
max_pos
))
self
.
data
=
self
.
h5_main
[
self
.
_start_pos
:
self
.
_end_pos
,
self
.
_current_sho_spec_slice
]
elif
self
.
_current_forc
<
self
.
_num_forcs
-
1
:
elif
self
.
_current_forc
<
self
.
_num_forcs
-
1
:
# Resest for next FORC
self
.
_current_forc
+=
1
...
...
@@ -927,7 +928,7 @@ class BELoopModel(Model):
self
.
sho_spec_inds_per_forc
*
(
self
.
_current_forc
+
1
))
self
.
_end_pos
=
int
(
min
(
self
.
h5_projected_loops
.
shape
[
0
],
self
.
_start_pos
+
self
.
max_pos
))
self
.
data
=
self
.
h5_projected_loops
[
self
.
_start_pos
:
self
.
_end_pos
,
self
.
_current_sho_spec_slice
]
elif
self
.
_current_forc
<
self
.
_num_forcs
-
1
:
elif
self
.
_current_forc
<
self
.
_num_forcs
-
1
:
# Resest for next FORC
self
.
_current_forc
+=
1
...
...
@@ -945,7 +946,7 @@ class BELoopModel(Model):
self
.
data
=
None
guess
=
self
.
h5_guess
[
self
.
_start_pos
:
self
.
_end_pos
,
self
.
_current_met_spec_slice
].
reshape
([
-
1
,
1
])
self
.
_current_met_spec_slice
].
reshape
([
-
1
,
1
])
self
.
guess
=
compound_to_scalar
(
guess
)[:,
:
-
1
]
def
_create_guess_datasets
(
self
):
...
...
@@ -1052,7 +1053,7 @@ class BELoopModel(Model):
pix_inds
=
np
.
where
(
labels
==
clust_id
)[
0
]
temp
=
np
.
atleast_2d
(
loop_fit_results
[
clust_id
][
0
].
x
)
# convert to the appropriate dtype as well:
r2
=
1
-
np
.
sum
(
np
.
abs
(
loop_fit_results
[
clust_id
][
0
].
fun
**
2
))
r2
=
1
-
np
.
sum
(
np
.
abs
(
loop_fit_results
[
clust_id
][
0
].
fun
**
2
))
guess_parms
[
pix_inds
]
=
realToCompound
(
np
.
hstack
([
temp
,
np
.
atleast_2d
(
r2
)]),
loop_fit32
)
return
guess_parms
...
...
@@ -1126,6 +1127,7 @@ class LoopOptimize(Optimize):
"""
Subclass of Optimize with BE Loop specific changes
"""
def
_initiateSolverAndObjFunc
(
self
):
if
self
.
solver_type
in
scipy
.
optimize
.
__dict__
.
keys
():
solver
=
scipy
.
optimize
.
__dict__
[
self
.
solver_type
]
...
...
pycroscopy/analysis/be_sho_model.py
View file @
66229074
...
...
@@ -281,7 +281,8 @@ class BESHOmodel(Model):
Default None, output of psutil.cpu_count - 2 is used
strategy: string
Default is 'Wavelet_Peaks'.
Can be one of ['wavelet_peaks', 'relative_maximum', 'gaussian_processes']. For updated list, run GuessMethods.methods
Can be one of ['wavelet_peaks', 'relative_maximum', 'gaussian_processes']. For updated list, run
GuessMethods.methods
options: dict
Default Options for wavelet_peaks{"peaks_widths": np.array([10,200]), "peak_step":20}.
Dictionary of options passed to strategy. For more info see GuessMethods documentation.
...
...
@@ -428,7 +429,8 @@ class BESHOmodel(Model):
sho_vec
[
'Frequency [Hz]'
]
=
self
.
freq_vec
[
peak_inds
]
# Frequency
sho_vec
[
'Quality Factor'
]
=
np
.
ones_like
(
comp_vals
)
*
10
# Quality factor
# Add something here for the R^2
sho_vec
[
'R2 Criterion'
]
=
np
.
array
([
self
.
r_square
(
self
.
data
,
self
.
_sho_func
,
self
.
freq_vec
,
sho_parms
)
for
sho_parms
in
sho_vec
])
sho_vec
[
'R2 Criterion'
]
=
np
.
array
([
self
.
r_square
(
self
.
data
,
self
.
_sho_func
,
self
.
freq_vec
,
sho_parms
)
for
sho_parms
in
sho_vec
])
elif
strategy
in
[
'complex_gaussian'
]:
for
iresult
,
result
in
enumerate
(
results
):
sho_vec
[
'Amplitude [V]'
][
iresult
]
=
result
[
0
]
...
...
pycroscopy/analysis/guess_methods.py
View file @
66229074
...
...
@@ -21,6 +21,7 @@ class GuessMethods(object):
input and return the guess parameters. The guess methods here use the keyword arguments to configure the returned
function.
"""
def
__init__
(
self
):
self
.
methods
=
[
'wavelet_peaks'
,
'relative_maximum'
,
'gaussian_processes'
,
'complex_gaussian'
]
...
...
@@ -72,7 +73,7 @@ class GuessMethods(object):
warn
(
'Error: Please specify "peak_widths" kwarg to use this method'
)
@
staticmethod
def
absolute_maximum
(
*
args
,
**
kwargs
):
def
absolute_maximum
(
*
args
,
**
kwargs
):
"""
Finds maximum in 1d-array
Parameters
...
...
@@ -84,9 +85,11 @@ class GuessMethods(object):
-------
fastpeak: callable function
"""
def
fastpeak
(
vector
):
vec_max
=
np
.
argmax
(
vector
)
return
vec_max
return
fastpeak
@
staticmethod
...
...
@@ -184,4 +187,4 @@ def r_square(data_vec, func, *args, **kwargs):
r_squared
=
1
-
ss_res
/
ss_tot
if
ss_tot
>
0
else
0
return
r_squared
\ No newline at end of file
return
r_squared
pycroscopy/analysis/model.py
View file @
66229074
...
...
@@ -39,8 +39,8 @@ class Model(object):
"""
def
__init__
(
self
,
h5_main
,
variables
=
[
'Frequency'
],
parallel
=
True
):
"""
For now, we assume that the guess dataset has not been generated for this dataset but we will relax this
requirement
after testing the basic components.
For now, we assume that the guess dataset has not been generated for this dataset but we will relax this
requirement
after testing the basic components.
"""
# Checking if dataset is "Main"
...
...
pycroscopy/analysis/optimize.py
View file @
66229074
...
...
@@ -77,7 +77,8 @@ class Optimize(object):
Number of logical cores to use for computing
strategy: string
Default is 'Wavelet_Peaks'.
Can be one of ['wavelet_peaks', 'relative_maximum', 'gaussian_processes']. For updated list, run GuessMethods.methods
Can be one of ['wavelet_peaks', 'relative_maximum', 'gaussian_processes']. For updated list,
run GuessMethods.methods
options: dict
Default: Options for wavelet_peaks{"peaks_widths": np.array([10,200]), "peak_step":20}.
Dictionary of options passed to strategy. For more info see GuessMethods documentation.
...
...
pycroscopy/analysis/tests/test_123.py
View file @
66229074
from
unittest
import
TestCase
class
Test
(
TestCase
):
def
test_this_module
(
self
):
pass
pycroscopy/analysis/utils/atom_finding.py
View file @
66229074
...
...
@@ -381,7 +381,9 @@ def visualize_atom_fit(atom_rough_pos, all_atom_guesses, parm_dict, fitting_parm
atom_ind
=
np
.
argsort
(
temp_dist
)[
0
]
parm_dict
[
'verbose'
]
=
True
coef_guess_mat
,
lb_mat
,
ub_mat
,
coef_fit_mat
,
fit_region
,
s_mat
,
plsq
=
fit_atom_pos
((
atom_ind
,
parm_dict
,
fitting_parms
))
coef_guess_mat
,
lb_mat
,
ub_mat
,
coef_fit_mat
,
fit_region
,
s_mat
,
plsq
=
fit_atom_pos
((
atom_ind
,
parm_dict
,
fitting_parms
))
print
(
'
\t
Amplitude
\t
x position
\t
y position
\t
sigma'
)
print
(
'-------------------GUESS---------------------'
)
...
...
pycroscopy/analysis/utils/atom_finding_general_gaussian.py
View file @
66229074
...
...
@@ -24,6 +24,7 @@ from ...io.io_hdf5 import ioHDF5
from
...viz
import
plot_utils
from
..model
import
Model
def
do_fit
(
single_parm
):
parms
=
single_parm
[
0
]
coef_guess_mat
=
parms
[
1
]
...
...
@@ -148,7 +149,8 @@ class Gauss_Fit(object):
Parameters used for atom position fitting
'fit_region_size': region to consider when fitting. Should be large enough to see the nearest neighbors.
'num_nearest_neighbors': the number of nearest neighbors to fit
'sigma_guess': starting guess for gaussian standard deviation. Should be about the size of an atom width in pixels.
'sigma_guess': starting guess for gaussian standard deviation. Should be about the size of an atom width in
pixels.
'position_range': range that the fitted position can move from initial guess position in pixels
'max_function_evals': maximum allowed function calls; passed to the least squares fitter
'fitting_tolerance': target difference between the fit and the data
...
...
@@ -262,12 +264,14 @@ class Gauss_Fit(object):
parm_list
=
itt
.
izip
(
self
.
guess_parms
,
itt
.
repeat
(
self
.
fitting_parms
))
self
.
fitting_results
=
[
do_fit
(
parm
)
for
parm
in
parm_list
]
print
(
'Finalizing datasets...'
)
self
.
guess_dataset
=
np
.
zeros
(
shape
=
(
self
.
num_atoms
,
self
.
num_nearest_neighbors
+
1
),
dtype
=
self
.
atom_coeff_dtype
)
print
(
'Finalizing datasets...'
)
self
.
guess_dataset
=
np
.
zeros
(
shape
=
(
self
.
num_atoms
,
self
.
num_nearest_neighbors
+
1
),
dtype
=
self
.
atom_coeff_dtype
)
self
.
fit_dataset
=
np
.
zeros
(
shape
=
self
.
guess_dataset
.
shape
,
dtype
=
self
.
guess_dataset
.
dtype
)
for
atom_ind
,
single_atom_results
in
enumerate
(
self
.
fitting_results
):
types
=
np
.
hstack
((
self
.
h5_guess
[
'type'
][
atom_ind
],
[
self
.
h5_guess
[
'type'
][
neighbor
]
for
neighbor
in
self
.
closest_neighbors_mat
[
atom_ind
]]))
types
=
np
.
hstack
((
self
.
h5_guess
[
'type'
][
atom_ind
],
[
self
.
h5_guess
[
'type'
][
neighbor
]
for
neighbor
in
self
.
closest_neighbors_mat
[
atom_ind
]]))
atom_data
=
np
.
hstack
((
np
.
vstack
(
types
),
single_atom_results
))
atom_data
=
[
tuple
(
element
)
for
element
in
atom_data
]
self
.
fit_dataset
[
atom_ind
]
=
atom_data
...
...
@@ -510,10 +514,12 @@ class Gauss_Fit(object):
ds_atom_fits
=
MicroDataset
(
'Gaussian_Fits'
,
data
=
self
.
fit_dataset
)
ds_motif_guesses
=
MicroDataset
(
'Motif_Guesses'
,
data
=
self
.
motif_guess_dataset
)
ds_motif_fits
=
MicroDataset
(
'Motif_Fits'
,
data
=
self
.
motif_converged_dataset
)
ds_nearest_neighbors
=
MicroDataset
(
'Nearest_Neighbor_Indices'
,
data
=
self
.
closest_neighbors_mat
,
dtype
=
np
.
uint32
)
ds_nearest_neighbors
=
MicroDataset
(
'Nearest_Neighbor_Indices'
,
data
=
self
.
closest_neighbors_mat
,
dtype
=
np
.
uint32
)
dgrp_atom_finding
=
MicroDataGroup
(
self
.
atom_grp
.
name
.
split
(
'/'
)[
-
1
],
parent
=
self
.
atom_grp
.
parent
.
name
)
dgrp_atom_finding
.
attrs
=
self
.
fitting_parms
dgrp_atom_finding
.
addChildren
([
ds_atom_guesses
,
ds_atom_fits
,
ds_motif_guesses
,
ds_motif_fits
,
ds_nearest_neighbors
])
dgrp_atom_finding
.
addChildren
([
ds_atom_guesses
,
ds_atom_fits
,
ds_motif_guesses
,
ds_motif_fits
,
ds_nearest_neighbors
])
hdf
=
ioHDF5
(
self
.
atom_grp
.
file
)
h5_atom_refs
=
hdf
.
writeData
(
dgrp_atom_finding
)
...
...
@@ -521,19 +527,18 @@ class Gauss_Fit(object):
return
self
.
atom_grp
def
fit_motif
(
self
,
plot_results
=
True
):
'''
"""
Parameters
----------
plot_results: boolean (default = True)
Flag to specify whether a result summary should be plotted
plot_results: boolean (default = True)
Flag to specify whether a result summary should be plotted
Returns
-------
motif_converged_dataset: NxM numpy array of tuples where N is the number of motifs and M is the number
of nearest neighbors considered. Each tuple contains the converged parameters for a gaussian fit to
an atom in a motif window.
'''
motif_converged_dataset: NxM numpy array of tuples where N is the number of motifs and M is the number
of nearest neighbors considered. Each tuple contains the converged parameters for a gaussian fit to
an atom in a motif window.
"""
self
.
motif_guesses
=
[]
self
.
motif_parms
=
[]
...
...
pycroscopy/analysis/utils/be_loop.py
View file @
66229074
...
...
@@ -506,7 +506,10 @@ def calc_switching_coef_vec(loop_coef_vec, nuc_threshold):
switching_coef_vec
[
'R+'
]
=
loop_coef_vec
[:,
0
]
+
loop_coef_vec
[:,
1
]
switching_coef_vec
[
'R-'
]
=
loop_coef_vec
[:,
0
]
switching_coef_vec
[
'Switchable Polarization'
]
=
loop_coef_vec
[:,
1
]
switching_coef_vec
[
'Work of Switching'
]
=
np
.
abs
(
loop_coef_vec
[:,
3
]
-
loop_coef_vec
[:,
2
])
*
np
.
abs
(
loop_coef_vec
[:,
1
])
switching_coef_vec
[
'Work of Switching'
]
=
np
.
abs
(
loop_coef_vec
[:,
3
]
-
loop_coef_vec
[:,
2
])
*
\
np
.
abs
(
loop_coef_vec
[:,
1
])
switching_coef_vec
[
'Nucleation Bias 1'
]
=
nuc_v01
switching_coef_vec
[
'Nucleation Bias 2'
]
=
nuc_v02
...
...
pycroscopy/analysis/utils/be_sho.py
View file @
66229074
...
...
@@ -63,7 +63,10 @@ def SHOestimateGuess(w_vec, resp_vec, num_points=5):
a
=
((
w1
**
2
-
w2
**
2
)
*
(
w1
*
X2
*
(
X1
**
2
+
Y1
**
2
)
-
w2
*
X1
*
(
X2
**
2
+
Y2
**
2
)))
/
denom
b
=
((
w1
**
2
-
w2
**
2
)
*
(
w1
*
Y2
*
(
X1
**
2
+
Y1
**
2
)
-
w2
*
Y1
*
(
X2
**
2
+
Y2
**
2
)))
/
denom
c
=
((
w1
**
2
-
w2
**
2
)
*
(
X2
*
Y1
-
X1
*
Y2
))
/
denom
d
=
(
w1
**
3
*
(
X1
**
2
+
Y1
**
2
)
-
w1
**
2
*
w2
*
(
X1
*
X2
+
Y1
*
Y2
)
-
w1
*
w2
**
2
*
(
X1
*
X2
+
Y1
*
Y2
)
+
w2
**
3
*
(
X2
**
2
+
Y2
**
2
))
/
denom
d
=
(
w1
**
3
*
(
X1
**
2
+
Y1
**
2
)
-
w1
**
2
*
w2
*
(
X1
*
X2
+
Y1
*
Y2
)
-
w1
*
w2
**
2
*
(
X1
*
X2
+
Y1
*
Y2
)
+
w2
**
3
*
(
X2
**
2
+
Y2
**
2
))
/
denom
if
d
>
0
:
a_mat
=
append
(
a_mat
,
[
a
,
b
,
c
,
d
])
...
...
@@ -75,7 +78,9 @@ def SHOestimateGuess(w_vec, resp_vec, num_points=5):
H_fit
=
A_fit
*
w0_fit
**
2
*
exp
(
1j
*
phi_fit
)
/
(
w_vec
**
2
-
1j
*
w_vec
*
w0_fit
/
Q_fit
-
w0_fit
**
2
)
e_vec
=
append
(
e_vec
,
sum
((
real
(
H_fit
)
-
real
(
resp_vec
))
**
2
)
+
sum
((
imag
(
H_fit
)
-
imag
(
resp_vec
))
**
2
))
e_vec
=
append
(
e_vec
,
sum
((
real
(
H_fit
)
-
real
(
resp_vec
))
**
2
)
+
sum
((
imag
(
H_fit
)
-
imag
(
resp_vec
))
**
2
))
if
a_mat
.
size
>
0
:
a_mat
=
a_mat
.
reshape
(
-
1
,
4
)
...
...
pycroscopy/analysis/utils/tree.py
View file @
66229074
...
...
@@ -8,12 +8,14 @@ Created on Wed Aug 31 17:03:29 2016
from
__future__
import
division
,
print_function
,
absolute_import
,
unicode_literals
import
numpy
as
np
# TODO: Test and debug node and clusterTree classes for agglomerative clustering etc
class
Node
(
object
):
"""
Basic unit of a tree - a node. Keeps track of its value, labels, parent, children, level in the tree etc.
"""
def
__init__
(
self
,
name
,
value
=
None
,
parent
=
None
,
dist
=
0
,
labels
=
None
,
children
=
[],
compute_mean
=
False
,
verbose
=
False
):
"""
...
...
@@ -94,6 +96,7 @@ class ClusterTree(object):
"""
Creates a tree representation from the provided linkage pairing. Useful for clustering
"""
def
__init__
(
self
,
linkage_pairing
,
labels
,
distances
=
None
,
centroids
=
None
):
"""
Parameters
...
...
pycroscopy/io/__version__.py
View file @
66229074
major
=
0
minor
=
0
micro
=
1
major
=
0
minor
=
0
micro
=
1
pycroscopy/io/be_hdf_utils.py
View file @
66229074
...
...
@@ -14,7 +14,7 @@ __all__ = [
'maxReadPixels'
,
'getActiveUDVSsteps'
,
'getDataIndicesForUDVSstep'
,
'getForExcitWfm'
,
'getIndicesforPlotGroup'
,
'getSliceForExcWfm'
,
'generateTestSpectroscopicData'
,
'getSpecSliceForUDVSstep'
,
'isSimpleDataset'
,
'reshapeToNsteps'
,
'reshapeToOneStep'
]
]
def
maxReadPixels
(
max_memory
,
tot_pix
,
bins_per_step
,
bytes_per_bin
=
4
):
...
...
@@ -44,10 +44,11 @@ def maxReadPixels(max_memory, tot_pix, bins_per_step, bytes_per_bin=4):
# alternatively try .nbytes
bytes_per_step
=
bins_per_step
*
bytes_per_bin
max_pix
=
np
.
rint
(
max_memory
/
bytes_per_step
)
#print('Allowed to read {} of {} pixels'.format(max_pix,tot_pix))
max_pix
=
max
(
1
,
min
(
tot_pix
,
max_pix
))
#
print('Allowed to read {} of {} pixels'.format(max_pix,tot_pix))
max_pix
=
max
(
1
,
min
(
tot_pix
,
max_pix
))
return
np
.
uint
(
max_pix
)
def
getActiveUDVSsteps
(
h5_raw
):
"""
Returns all the active UDVS steps in the data
...
...
@@ -62,9 +63,10 @@ def getActiveUDVSsteps(h5_raw):
steps : 1D numpy array
Active UDVS steps
"""
udvs_step_vec
=
getAuxData
(
h5_raw
,
auxDataName
=
[
'UDVS_Indices'
])[
0
].
value
udvs_step_vec
=
getAuxData
(
h5_raw
,
auxDataName
=
[
'UDVS_Indices'
])[
0
].
value
return
np
.
unique
(
udvs_step_vec
)
def
getSliceForExcWfm
(
h5_bin_wfm
,
excit_wfm
):
"""
Returns the indices that correspond to the given excitation waveform
...
...
@@ -84,8 +86,9 @@ def getSliceForExcWfm(h5_bin_wfm, excit_wfm):
Slice with the start and end indices
"""
temp
=
np
.
where
(
h5_bin_wfm
.
value
==
excit_wfm
)[
0
]
return
slice
(
temp
[
0
],
temp
[
-
1
]
+
1
)
# Need to add one additional index otherwise, the last index will be lost
return
slice
(
temp
[
0
],
temp
[
-
1
]
+
1
)
# Need to add one additional index otherwise, the last index will be lost
def
getDataIndicesForUDVSstep
(
h5_udvs_inds
,
udvs_step_index
):
"""
Returns the spectroscopic indices that correspond to the given udvs_step_index
...
...
@@ -106,7 +109,8 @@ def getDataIndicesForUDVSstep(h5_udvs_inds, udvs_step_index):
"""
spec_ind_udvs_step_col
=
h5_udvs_inds
[
h5_udvs_inds
.
attrs
.
get
(
'UDVS_Step'
)]
return
np
.
where
(
spec_ind_udvs_step_col
==
udvs_step_index
)[
0
]
def
getSpecSliceForUDVSstep
(
h5_udvs_inds
,
udvs_step_index
):
"""
Returns the spectroscopic indices that correspond to the given udvs_step_index
...
...
@@ -126,8 +130,8 @@ def getSpecSliceForUDVSstep(h5_udvs_inds, udvs_step_index):
Object containing the start and end indices
"""
temp
=
np
.
where
(
h5_udvs_inds
.
value
==
udvs_step_index
)[
0
]
return
slice
(
temp
[
0
],
temp
[
-
1
]
+
1
)
# Need to add one additional index otherwise, the last index will be lost
return
slice
(
temp
[
0
],
temp
[
-
1
]
+
1
)
# Need to add one additional index otherwise, the last index will be lost
def
getForExcitWfm
(
h5_main
,
h5_other
,
wave_type
):
"""
...
...
@@ -147,12 +151,12 @@ def getForExcitWfm(h5_main, h5_other, wave_type):
---------
freq_vec : 1D numpy array
data specific to specified excitation waveform
"""
"""
h5_bin_wfm_type
=
getAuxData
(
h5_main
,
auxDataName
=
[
'Bin_Wfm_Type'
])[
0
]
inds
=
np
.
where
(
h5_bin_wfm_type
.
value
==
wave_type
)[
0
]
return
h5_other
[
slice
(
inds
[
0
],
inds
[
-
1
]
+
1
)]
return
h5_other
[
slice
(
inds
[
0
],
inds
[
-
1
]
+
1
)]
def
getIndicesforPlotGroup
(
h5_udvs_inds
,
ds_udvs
,
plt_grp_name
):
"""
For a provided plot group name in the udvs table, this function
...
...
@@ -180,32 +184,33 @@ def getIndicesforPlotGroup(h5_udvs_inds, ds_udvs, plt_grp_name):
udvs_plt_grp_col : 1D numpy array
data contained within the udvs table for the requested plot group
"""
# working on the UDVS table first:
# getting the numpy array corresponding the requested plot group
udvs_col_data
=
np
.
squeeze
(
ds_udvs
[
ds_udvs
.
attrs
.
get
(
plt_grp_name
)])
# All UDVS steps that are NOT part of the plot grop are empty cells in the table
# and hence assume a nan value.
# getting the udvs step indices that belong to this plot group:
step_inds
=
np
.
where
(
np
.
isnan
(
udvs_col_data
)
==
False
)[
0
]
step_inds
=
np
.
where
(
np
.
isnan
(
udvs_col_data
)
==
False
)[
0
]
# Getting the values in that plot group that were non NAN
udvs_plt_grp_col
=
udvs_col_data
[
step_inds
]
#---------------------------------
#
---------------------------------
# Now we use the udvs step indices calculated above to get
# the indices in the spectroscopic indices table
spec_ind_udvs_step_col
=
h5_udvs_inds
[
h5_udvs_inds
.
attrs
.
get
(
'UDVS_Step'
)]
num_bins
=
len
(
np
.
where
(
spec_ind_udvs_step_col
==
step_inds
[
0
])[
0
])
# Stepehen says that we can assume that the number of bins will NOT change in a plot group
step_bin_indices
=
np
.
zeros
(
shape
=
(
len
(
step_inds
),
num_bins
),
dtype
=
int
)
step_bin_indices
=
np
.
zeros
(
shape
=
(
len
(
step_inds
),
num_bins
),
dtype
=
int
)
for
indx
,
step
in
enumerate
(
step_inds
):
step_bin_indices
[
indx
,:]
=
np
.
where
(
spec_ind_udvs_step_col
==
step
)[
0
]
oneD_indices
=
step_bin_indices
.
reshape
((
step_bin_indices
.
shape
[
0
]
*
step_bin_indices
.
shape
[
1
]))
step_bin_indices
[
indx
,
:]
=
np
.
where
(
spec_ind_udvs_step_col
==
step
)[
0
]
oneD_indices
=
step_bin_indices
.
reshape
((
step_bin_indices
.
shape
[
0
]
*
step_bin_indices
.
shape
[
1
]))
return
(
step_bin_indices
,
oneD_indices
,
udvs_plt_grp_col
)
def
reshapeToOneStep
(
raw_mat
,
num_steps
):
"""
Reshapes provided data from (pos, step * bin) to (pos * step, bin).
...
...
@@ -224,12 +229,13 @@ def reshapeToOneStep(raw_mat, num_steps):
Data rearranged as (positions * step, bin)
"""
num_pos
=
raw_mat
.
shape
[
0
]
num_bins
=
int
(
raw_mat
.
shape
[
1
]
/
num_steps
)
num_bins
=
int
(
raw_mat
.
shape
[
1
]
/
num_steps
)
oneD
=
raw_mat
oneD
=
oneD
.
reshape
((
num_bins
*
num_steps
*
num_pos
))
twoD
=
oneD
.
reshape
((
num_steps
*
num_pos
,
num_bins
))
return
twoD
def
reshapeToNsteps
(
raw_mat
,
num_steps
):
"""
Reshapes provided data from (positions * step, bin) to (positions, step * bin).
...
...
@@ -248,12 +254,13 @@ def reshapeToNsteps(raw_mat, num_steps):
Data rearranged as (positions, step * bin)
"""
num_bins
=
raw_mat
.
shape
[
1
]
num_pos
=
int
(
raw_mat
.
shape
[
0
]
/
num_steps
)
num_pos
=
int
(
raw_mat
.
shape
[
0
]
/
num_steps
)
oneD
=
raw_mat
oneD
=
oneD
.
reshape
(
num_bins
*
num_steps
*
num_pos
)
twoD
=
oneD
.
reshape
((
num_pos
,
num_steps
*
num_bins
))
twoD
=
oneD
.
reshape
((
num_pos
,
num_steps
*
num_bins
))
return
twoD
def
generateTestSpectroscopicData
(
num_bins
=
7
,
num_steps
=
3
,
num_pos
=
4
):
"""
Generates a (preferably small) test data set using the given parameters.
...
...
@@ -276,10 +283,10 @@ def generateTestSpectroscopicData(num_bins=7, num_steps=3, num_pos=4):
"""
full_data
=
np
.
zeros
((
num_steps
*
num_bins
,
num_pos
))
for
pos
in
range
(
num_pos
):
bin_count
=
0
bin_count
=
0
for
step
in
range
(
num_steps
):
for
bind
in
range
(
num_bins
):
full_data
[
bin_count
,
pos
]
=
(
pos
+
1
)
*
100
+
(
step
+
1
)
*
10
+
(
bind
+
1
)
full_data
[
bin_count
,
pos
]
=
(
pos
+
1
)
*
100
+
(
step
+
1
)
*
10
+
(
bind
+
1
)
bin_count
+=
1
return
full_data
...
...
@@ -304,15 +311,16 @@ def isSimpleDataset(h5_main, isBEPS=True):
data_type : Boolean
Whether or not this dataset can be unraveled / flattened
"""
if
isBEPS
:
if
h5_main
.
parent
.
parent
.
attrs
[
'VS_mode'
]
in
[
'DC modulation mode'
,
'AC modulation mode with time reversal'
,
'current mode'
,
'Relaxation'
]:
beps_modes
=
[
'DC modulation mode'
,
'AC modulation mode with time reversal'
,
'current mode'
,
'Relaxation'
]
if
h5_main
.
parent
.
parent
.
attrs
[
'VS_mode'
]
in
beps_modes
:
# I am pretty sure that AC modulation also is simple
return
True
else
:
# Could be user defined or some other kind I am not aware of
# In many cases, some of these datasets could also potentially be simple datasets
ds_udvs
=
getAuxData
(
h5_main
,
auxDataName
=
[
'UDVS'
])[
0
]
ds_udvs
=
getAuxData
(
h5_main
,
auxDataName
=
[
'UDVS'
])[
0
]
excit_wfms
=
ds_udvs
[
ds_udvs
.
attrs
.
get
(
'wave_mod'
)]
wfm_types
=
np
.
unique
(
excit_wfms
)
if
len
(
wfm_types
)
==
1
:
...
...
@@ -336,7 +344,7 @@ def isSimpleDataset(h5_main, isBEPS=True):
# BEPS with multiple excitation waveforms but each excitation waveform has same number of bins
print
(
'All BEPS excitation waves have same number of bins'
)
return
True
return
False
return
False
else
:
# BE-Line
return
True
...
...
@@ -367,4 +375,4 @@ def isReshapable(h5_main, step_start_inds=None):
step_start_inds
=
np
.
hstack
((
step_start_inds
,
h5_main
.
shape
[
1
]))
num_bins
=
np
.
diff
(
step_start_inds
)
step_types
=
np
.
unique
(
num_bins
)
return
len
(
step_types
)
==
1
\ No newline at end of file
return
len
(
step_types
)
==
1
pycroscopy/io/hdf_utils.py
View file @
66229074
...
...
@@ -34,6 +34,7 @@ def print_tree(parent):
None
"""
def
__print
(
name
,
obj
):
print
(
name
)
...
...
@@ -577,6 +578,7 @@ def get_formatted_labels(h5_dset):
warn
(
'labels attribute was missing'
)
return
None
def
reshape_to_Ndims
(
h5_main
,
h5_pos
=
None
,
h5_spec
=
None
,
get_labels
=
False
):
"""
Reshape the input 2D matrix to be N-dimensions based on the
...
...
@@ -740,6 +742,7 @@ def reshape_to_Ndims(h5_main, h5_pos=None, h5_spec=None, get_labels=False):
return
results
def
reshape_from_Ndims
(
ds_Nd
,
h5_pos
=
None
,
h5_spec
=
None
):
"""
Reshape the input 2D matrix to be N-dimensions based on the
...
...
@@ -838,6 +841,7 @@ def reshape_from_Ndims(ds_Nd, h5_pos=None, h5_spec=None):
return
ds_2d
,
True
def
get_dimensionality
(
ds_index
,
index_sort
=
None
):
"""
Get the size of each index dimension in a specified sort order
...
...
@@ -1096,7 +1100,6 @@ def copyRegionRefs(h5_source, h5_target):
h5_spec_inds
=
h5_target
.
file
[
h5_target
.
attrs
[
'Spectroscopic_Indices'
]]
h5_spec_vals
=
h5_target
.
file
[
h5_target
.
attrs
[
'Spectroscopic_Values'
]]
for
key
in
h5_source
.
attrs
.
keys
():
if
'_Plot_Group'
not
in
key
:
continue
...
...
@@ -1260,8 +1263,8 @@ def buildReducedSpec(h5_spec_inds, h5_spec_vals, keep_dim, step_starts, basename
Create new MicroDatasets to hold the data
Name them based on basename
'''
ds_inds
=
MicroDataset
(
basename
+
'_Indices'
,
ind_mat
,
dtype
=
h5_spec_inds
.
dtype
)
ds_vals
=
MicroDataset
(
basename
+
'_Values'
,
val_mat
,
dtype
=
h5_spec_vals
.
dtype
)
ds_inds
=
MicroDataset
(
basename
+
'_Indices'
,
ind_mat
,
dtype
=
h5_spec_inds
.
dtype
)
ds_vals
=
MicroDataset
(
basename
+
'_Values'
,
val_mat
,
dtype
=
h5_spec_vals
.
dtype
)
# Extracting the labels from the original spectroscopic data sets
sho_inds_labs
=
h5_spec_inds
.
attrs
[
'labels'
][
keep_dim
]
# Creating the dimension slices for the new spectroscopic data sets
...
...
@@ -1337,7 +1340,7 @@ def calc_chunks(dimensions, data_size, unit_chunks=None, max_chunk_mem=10240):
Loop until chunk_size is greater than the maximum chunk_mem or the chunk_size is equal to
that of dimensions
'''
while
np
.
prod
(
unit_chunks
)
*
data_size
<=
max_chunk_mem
:
while
np
.
prod
(
unit_chunks
)
*
data_size
<=
max_chunk_mem
:
'''
Check if all chunk dimensions are greater or equal to the
actual dimensions. Exit the loop if true.
...
...
@@ -1349,7 +1352,7 @@ def calc_chunks(dimensions, data_size, unit_chunks=None, max_chunk_mem=10240):
Find the index of the next chunk to be increased and increment it by the base_chunk
size
'''
ichunk
=
np
.
argmax
(
dimensions
/
unit_chunks
)
ichunk
=
np
.
argmax
(
dimensions
/
unit_chunks
)
unit_chunks
[
ichunk
]
+=
base_chunks
[
ichunk
]
'''
...
...
@@ -1469,7 +1472,7 @@ def create_spec_inds_from_vals(ds_spec_val_mat):
Find how quickly the spectroscopic values are changing in each row
and the order of row from fastest changing to slowest.
"""
change_count
=
[
len
(
np
.
where
([
row
[
i
]
!=
row
[
i
-
1
]
for
i
in
range
(
len
(
row
))])[
0
])
for
row
in
ds_spec_val_mat
]
change_count
=
[
len
(
np
.
where
([
row
[
i
]
!=
row
[
i
-
1
]
for
i
in
range
(
len
(
row
))])[
0
])
for
row
in
ds_spec_val_mat
]
change_sort
=
np
.
argsort
(
change_count
)[::
-
1
]
"""
...
...
@@ -1479,7 +1482,7 @@ def create_spec_inds_from_vals(ds_spec_val_mat):
indices
=
np
.
zeros
(
ds_spec_val_mat
.
shape
[
0
])
for
jcol
in
range
(
1
,
ds_spec_val_mat
.
shape
[
1
]):
this_col
=
ds_spec_val_mat
[
change_sort
,
jcol
]
last_col
=
ds_spec_val_mat
[
change_sort
,
jcol
-
1
]
last_col
=
ds_spec_val_mat
[
change_sort
,
jcol
-
1
]
"""
Check if current column values are different than those
...
...
pycroscopy/io/io_hdf5.py
View file @
66229074
...
...
@@ -25,7 +25,6 @@ if sys.version_info.major == 3: