Unverified Commit 1b287a8a authored by CompPhysChris's avatar CompPhysChris Committed by GitHub
Browse files

Merge pull request #127 from pycroscopy/cades_dev

Cades dev
parents c6be6c5b b460d473
......@@ -152,15 +152,15 @@ h5_s = h5_svd_group['S']
abun_maps = np.reshape(h5_u[:,:25], (num_rows, num_cols, -1))
# Visualize the variance / statistical importance of each component:
px.plot_utils.plotScree(h5_s, title='Note the exponential drop of variance with number of components')
px.plot_utils.plot_scree(h5_s, title='Note the exponential drop of variance with number of components')
# Visualize the eigenvectors:
first_evecs = h5_v[:9, :]
px.plot_utils.plot_loops(freq_vec, np.abs(first_evecs), x_label=x_label, y_label=y_label, plots_on_side=3,
subtitles='Component', title='SVD Eigenvectors (Amplitude)', evenly_spaced=False)
subtitle_prefix='Component', title='SVD Eigenvectors (Amplitude)', evenly_spaced=False)
px.plot_utils.plot_loops(freq_vec, np.angle(first_evecs), x_label=x_label, y_label='Phase (rad)', plots_on_side=3,
subtitles='Component', title='SVD Eigenvectors (Phase)', evenly_spaced=False)
subtitle_prefix='Component', title='SVD Eigenvectors (Phase)', evenly_spaced=False)
# Visualize the abundance maps:
px.plot_utils.plot_map_stack(abun_maps, num_comps=9, heading='SVD Abundance Maps',
......
......@@ -152,15 +152,15 @@ h5_s = h5_svd_group['S']
abun_maps = np.reshape(h5_u[:,:25], (num_rows, num_cols, -1))
# Visualize the variance / statistical importance of each component:
px.plot_utils.plotScree(h5_s, title='Note the exponential drop of variance with number of components')
px.plot_utils.plot_scree(h5_s, title='Note the exponential drop of variance with number of components')
# Visualize the eigenvectors:
first_evecs = h5_v[:9, :]
px.plot_utils.plot_loops(freq_vec, np.abs(first_evecs), x_label=x_label, y_label=y_label, plots_on_side=3,
subtitles='Component', title='SVD Eigenvectors (Amplitude)', evenly_spaced=False)
subtitle_prefix='Component', title='SVD Eigenvectors (Amplitude)', evenly_spaced=False)
px.plot_utils.plot_loops(freq_vec, np.angle(first_evecs), x_label=x_label, y_label='Phase (rad)', plots_on_side=3,
subtitles='Component', title='SVD Eigenvectors (Phase)', evenly_spaced=False)
subtitle_prefix='Component', title='SVD Eigenvectors (Phase)', evenly_spaced=False)
# Visualize the abundance maps:
px.plot_utils.plot_map_stack(abun_maps, num_comps=9, heading='SVD Abundance Maps',
......
This diff is collapsed.
This diff is collapsed.
......@@ -1610,18 +1610,20 @@ def create_spec_inds_from_vals(ds_spec_val_mat):
return ds_spec_inds_mat
def get_unit_values(h5_spec_ind, h5_spec_val, dim_names=None):
def get_unit_values(h5_inds, h5_vals, is_spec=True, dim_names=None):
"""
Gets the unit arrays of values that describe the spectroscopic dimensions
Parameters
----------
h5_spec_ind : h5py.Dataset
Spectroscopic Indices dataset
h5_spec_val : h5py.Dataset
Spectroscopic Values dataset
h5_inds : h5py.Dataset
Spectroscopic or Position Indices dataset
h5_vals : h5py.Dataset
Spectroscopic or Position Values dataset
is_spec : bool, recommended
Are the provided datasets spectral. Default = True
dim_names : str, or list of str, Optional
Names of the dimensions of interest
Names of the dimensions of interest. Default = all
Note - this function can be extended / modified for ancillary position dimensions as well
......@@ -1631,33 +1633,41 @@ def get_unit_values(h5_spec_ind, h5_spec_val, dim_names=None):
Dictionary containing the unit array for each dimension. The name of the dimensions are the keys.
"""
# First load to memory
inds_mat = h5_inds[()]
vals_mat = h5_vals[()]
if not is_spec:
# Convert to spectral shape
inds_mat = np.transpose(inds_mat)
vals_mat = np.transpose(vals_mat)
# For all dimensions, find where the index = 0
# basically, we are indexing all dimensions to 0
first_indices = []
for dim_ind in range(h5_spec_ind.shape[0]):
first_indices.append(h5_spec_ind[dim_ind] == 0)
for dim_ind in range(inds_mat.shape[0]):
first_indices.append(inds_mat[dim_ind] == 0)
first_indices = np.vstack(first_indices)
spec_dim_names = get_attr(h5_spec_ind, 'labels')
full_dim_names = get_attr(h5_inds, 'labels')
if dim_names is None:
dim_names = spec_dim_names
dim_names = full_dim_names
elif not isinstance(dim_names, list):
dim_names = [dim_names]
unit_values = dict()
for dim_name in dim_names:
# Find the row in the spectroscopic indices that corresponds to the dimensions we want to slice:
desired_row_ind = np.where(spec_dim_names == dim_name)[0][0]
desired_row_ind = np.where(full_dim_names == dim_name)[0][0]
# Find indices of all other dimensions
remaining_dims = list(range(h5_spec_ind.shape[0]))
remaining_dims = list(range(inds_mat.shape[0]))
remaining_dims.remove(desired_row_ind)
# The intersection of all these indices should give the desired index for the desired row
intersections = np.all(first_indices[remaining_dims, :], axis=0)
# apply this slicing to the values dataset:
unit_values[dim_name] = h5_spec_val[desired_row_ind, intersections]
unit_values[dim_name] = vals_mat[desired_row_ind, intersections]
return unit_values
......
......@@ -13,6 +13,7 @@ import numpy as np
from .hdf_utils import checkIfMain, get_attr, get_data_descriptor, get_formatted_labels, \
get_dimensionality, get_sort_order, get_unit_values, reshape_to_Ndims
from .io_utils import transformToReal
from ..viz.jupyter_utils import simple_ndim_visualizer
class PycroDataset(h5py.Dataset):
......@@ -339,3 +340,26 @@ class PycroDataset(h5py.Dataset):
return transformToReal(data_slice), success
else:
return data_slice, success
def visualize(self, slice_dict=None, **kwargs):
"""
Interactive visualization of this dataset. Only available on jupyter notebooks
Parameters
----------
slice_dict : dictionary, optional
Slicing instructions
"""
# TODO: Robust implementation that allows slicing
if len(self.pos_dim_labels + self.spec_dim_labels) > 4:
raise NotImplementedError('Unable to support visualization of more than 4 dimensions. Try slicing')
data_mat = self.get_n_dim_form()
pos_dim_names = self.pos_dim_labels[::-1]
spec_dim_names = self.spec_dim_labels
pos_dim_units_old = get_attr(self.h5_pos_inds, 'units')
spec_dim_units_old = get_attr(self.h5_spec_inds, 'units')
pos_ref_vals = get_unit_values(self.h5_pos_inds, self.h5_pos_vals, is_spec=False)
spec_ref_vals = get_unit_values(self.h5_spec_inds, self.h5_spec_vals, is_spec=True)
simple_ndim_visualizer(data_mat, pos_dim_names, pos_dim_units_old, spec_dim_names, spec_dim_units_old,
pos_ref_vals=pos_ref_vals, spec_ref_vals=spec_ref_vals, **kwargs)
......@@ -22,7 +22,8 @@ from ...io_utils import getAvailableMem, recommendCores
from ...microdata import MicroDataset, MicroDataGroup
from ....analysis.optimize import Optimize
from ....processing.proc_utils import buildHistogram
from ....viz.plot_utils import plot_1d_spectrum, plot_2d_spectrogram, plot_histgrams
from ....viz.plot_utils import plot_histgrams
from ....viz.be_viz_utils import plot_1d_spectrum, plot_2d_spectrogram
nf32 = np.dtype({'names': ['super_band', 'inter_bin_band', 'sub_band'],
'formats': [np.float32, np.float32, np.float32]})
......@@ -492,8 +493,10 @@ def generatePlotGroups(h5_main, hdf, mean_resp, folder_path, basename, max_resp=
path_1d = path.join(folder_path, basename + '_Step_Avg_' + fig_title + '.png')
path_2d = path.join(folder_path, basename + '_Mean_Spec_' + fig_title + '.png')
path_hist = path.join(folder_path, basename + '_Histograms_' + fig_title + '.png')
plot_1d_spectrum(step_averaged_vec, freq_vec, fig_title, figure_path=path_1d)
plot_2d_spectrogram(mean_spec, freq_vec, fig_title, figure_path=path_2d)
fig_1d, axes_1d = plot_1d_spectrum(step_averaged_vec, freq_vec, fig_title)
fig_1d.savefig(path_1d, format='png', dpi=300)
fig_2d, axes_2d = plot_2d_spectrogram(mean_spec, freq_vec, title=fig_title)
fig_2d.savefig(path_2d, format='png', dpi=300)
if do_histogram:
plot_histgrams(hist_mat, hist_indices, grp.name, figure_path=path_hist)
......@@ -560,9 +563,10 @@ def visualize_plot_groups(h5_filepath):
plt_grp = grp[plt_grp_name]
if expt_type == 'BEPSData':
spect_data = plt_grp['Mean_Spectrogram'].value
plot_2d_spectrogram(spect_data, plt_grp['Bin_Frequencies'].value, plt_grp.attrs['Name'])
_ = plot_2d_spectrogram(spect_data, plt_grp['Bin_Frequencies'].value,
title=plt_grp.attrs['Name'])
step_avg_data = plt_grp['Step_Averaged_Response']
plot_1d_spectrum(step_avg_data, plt_grp['Bin_Frequencies'].value, plt_grp.attrs['Name'])
_ = plot_1d_spectrum(step_avg_data, plt_grp['Bin_Frequencies'].value, plt_grp.attrs['Name'])
try:
hist_data = plt_grp['Histograms']
hist_bins = plt_grp['Histograms_Indicies']
......
......@@ -26,7 +26,7 @@ from . import fft
from . import gmode_utils
from . import proc_utils
from . import svd_utils
from .svd_utils import doSVD, rebuild_svd
from .svd_utils import SVD, rebuild_svd
from . import decomposition
from .decomposition import Decomposition
from . import cluster
......@@ -58,5 +58,5 @@ else:
FeatureExtractor = FeatureExtractorParallel
geoTransformer = geoTransformerParallel
__all__ = ['Cluster', 'Decomposition', 'ImageWindow', 'doSVD', 'fft', 'gmode_utils', 'proc_utils', 'svd_utils',
__all__ = ['Cluster', 'Decomposition', 'ImageWindow', 'SVD', 'fft', 'gmode_utils', 'proc_utils', 'svd_utils',
'giv_utils', 'rebuild_svd', 'Process', 'parallel_compute', 'Process', 'GIVBayesian', 'SignalFilter']
......@@ -57,8 +57,11 @@ class GIVBayesian(Process):
# take these from kwargs
bayesian_parms = {'gam': 0.03, 'e': 10.0, 'sigma': 10.0, 'sigmaC': 1.0, 'num_samples': 2E3}
self.parm_dict = {'freq': self.ex_freq, 'num_x_steps': self.num_x_steps, 'r_extra': self.r_extra}
self.parm_dict.update(bayesian_parms)
self.parms_dict = {'freq': self.ex_freq, 'num_x_steps': self.num_x_steps, 'r_extra': self.r_extra}
self.parms_dict.update(bayesian_parms)
self.process_name = 'Bayesian_Inference'
self.duplicate_h5_groups = self._check_for_duplicates()
h5_spec_vals = getAuxData(h5_main, auxDataName=['Spectroscopic_Values'])[0]
self.single_ao = np.squeeze(h5_spec_vals[()])
......@@ -90,7 +93,7 @@ class GIVBayesian(Process):
"""
super(GIVBayesian, self)._set_memory_and_cores(cores=cores, mem=mem)
# Remember that the default number of pixels corresponds to only the raw data that can be held in memory
# In the case of simplified Bayeisan inference, four (roughly) equally sized datasets need to be held in memory:
# In the case of simplified Bayesian inference, four (roughly) equally sized datasets need to be held in memory:
# raw, compensated current, resistance, variance
self._max_pos_per_read = self._max_pos_per_read // 4 # Integer division
# Since these computations take far longer than functional fitting, do in smaller batches:
......@@ -130,12 +133,12 @@ class GIVBayesian(Process):
chunking=(1, self.single_ao.size), compression='gzip')
# don't bother adding any other attributes, all this will be taken from h5_main
bayes_grp = MicroDataGroup(self.h5_main.name.split('/')[-1] + '-Bayesian_Inference_',
bayes_grp = MicroDataGroup(self.h5_main.name.split('/')[-1] + '-' + self.process_name + '_',
parent=self.h5_main.parent.name)
bayes_grp.addChildren([ds_spec_inds, ds_spec_vals, ds_cap, ds_r_var, ds_res, ds_i_corr,
ds_cap_spec_inds, ds_cap_spec_vals])
bayes_grp.attrs = {'algorithm_author': 'Kody J. Law', 'last_pixel': 0}
bayes_grp.attrs.update(self.parm_dict)
bayes_grp.attrs.update(self.parms_dict)
if self.verbose:
bayes_grp.showTree()
......@@ -262,7 +265,7 @@ class GIVBayesian(Process):
half_v_steps = self.single_ao.size // 2
# remove additional parm and halve the x points
bayes_parms = self.parm_dict.copy()
bayes_parms = self.parms_dict.copy()
bayes_parms['num_x_steps'] = self.num_x_steps // 2
bayes_parms['econ'] = True
del(bayes_parms['freq'])
......
......@@ -9,7 +9,7 @@ import numpy as np
import psutil
import joblib
from ..io.hdf_utils import checkIfMain
from ..io.hdf_utils import checkIfMain, check_for_old, get_attributes
from ..io.io_hdf5 import ioHDF5
from ..io.io_utils import recommendCores, getAvailableMem
......@@ -50,13 +50,15 @@ class Process(object):
Encapsulates the typical steps performed when applying a processing function to a dataset.
"""
def __init__(self, h5_main, cores=None, max_mem_mb=4*1024, verbose=False):
def __init__(self, h5_main, h5_results_grp=None, cores=None, max_mem_mb=4*1024, verbose=False):
"""
Parameters
----------
h5_main : h5py.Dataset instance
The dataset over which the analysis will be performed. This dataset should be linked to the spectroscopic
indices and values, and position indices and values datasets.
h5_results_grp : h5py.Datagroup object, optional
Datagroup containing partially computed results
cores : uint, optional
Default - all available cores - 2
How many cores to use for the computation
......@@ -83,11 +85,52 @@ class Process(object):
self._start_pos = 0
self._end_pos = self.h5_main.shape[0]
self._results = None
self.h5_results_grp = None
# Determining the max size of the data that can be put into memory
self._set_memory_and_cores(cores=cores, mem=max_mem_mb)
self.duplicate_h5_groups = []
self.process_name = None # Reset this in the extended classes
self.parms_dict = None
self._results = None
self.h5_results_grp = h5_results_grp
if self.h5_results_grp is not None:
self._extract_params(h5_results_grp)
# DON'T check for duplicates since parms_dict has not yet been initialized.
# Sub classes will check by themselves if they are interested.
def _check_for_duplicates(self):
"""
Checks for instances where the process was applied to the same dataset with the same parameters
Returns
-------
duplicate_h5_groups : list of h5py.Datagroup objects
List of groups satisfying the above conditions
"""
duplicate_h5_groups = check_for_old(self.h5_main, self.process_name, new_parms=self.parms_dict)
if self.verbose:
print('Checking for duplicates:')
if duplicate_h5_groups is not None:
print('WARNING! ' + self.process_name + ' has already been performed with the same parameters before. '
'Consider reusing results')
print(duplicate_h5_groups)
return duplicate_h5_groups
def _extract_params(self, h5_partial_group):
"""
Extracts the necessary parameters from the provided h5 group to resume computation
Parameters
----------
h5_partial_group : h5py.Datagroup object
Datagroup containing partially computed results
"""
self.parms_dict = get_attributes(h5_partial_group)
self._start_pos = self.parms_dict.pop('last_pixel')
if self._start_pos == self.h5_main.shape[0] - 1:
raise ValueError('The last computed pixel shows that the computation was already complete')
def _set_memory_and_cores(self, cores=1, mem=1024):
"""
......@@ -172,6 +215,7 @@ class Process(object):
"""
The purpose of this function is to allow processes to resume from partly computed results
Start with self.h5_results_grp
"""
raise NotImplementedError('Please override the _get_existing_datasets specific to your process')
......@@ -188,9 +232,12 @@ class Process(object):
-------
"""
self._create_results_datasets()
self._start_pos = 0
if self._start_pos == 0:
# starting fresh
self._create_results_datasets()
else:
# resuming from previous checkpoint
self._get_existing_datasets()
self._read_data_chunk()
while self.data is not None:
......
......@@ -106,12 +106,8 @@ class SignalFilter(Process):
self.parms_dict['noise_threshold'] = self.noise_threshold
self.parms_dict['num_pix'] = self.num_effective_pix
duplicates = check_for_old(self.h5_main, 'FFT_Filtering', new_parms=self.parms_dict)
if self.verbose:
print('Checking for duplicates:')
print(duplicates)
if duplicates is not None:
print('WARNING! FFT filtering has already been performed with the same parameters before. Consider reusing results')
self.process_name = 'FFT_Filtering'
self.duplicate_h5_groups = self._check_for_duplicates()
self.data = None
self.filtered_data = None
......@@ -126,7 +122,7 @@ class SignalFilter(Process):
Creates all the datasets necessary for holding all parameters + data.
"""
grp_name = self.h5_main.name.split('/')[-1] + '-FFT_Filtering_'
grp_name = self.h5_main.name.split('/')[-1] + '-' + self.process_name + '_'
grp_filt = MicroDataGroup(grp_name, self.h5_main.parent.name)
self.parms_dict.update({'last_pixel': 0, 'algorithm': 'pycroscopy_SignalFilter'})
......
......@@ -13,159 +13,169 @@ import numpy as np
from sklearn.utils import gen_batches
from sklearn.utils.extmath import randomized_svd
from ..io.hdf_utils import getH5DsetRefs, checkAndLinkAncillary, findH5group, create_empty_dataset, \
from .process import Process
from ..io.hdf_utils import getH5DsetRefs, checkAndLinkAncillary, findH5group, \
getH5RegRefIndices, createRefFromIndices, checkIfMain, calc_chunks, copy_main_attributes, copyAttributes
from ..io.io_hdf5 import ioHDF5
from ..io.io_utils import check_dtype, transformToTargetType, getAvailableMem
from ..io.microdata import MicroDataset, MicroDataGroup
def doSVD(h5_main, num_comps=None):
"""
Does SVD on the provided dataset and writes the result. File is not closed
Parameters
----------
h5_main : h5py.Dataset reference
Reference to the dataset on which SVD will be performed
num_comps : Unsigned integer (Optional)
Number of principal components of interest
Returns
-------
h5_pca : h5py.Datagroup reference
Reference to the group containing the PCA results
"""
if not checkIfMain(h5_main):
warn('Dataset does not meet requirements for performing PCA.')
return
dset_name = h5_main.name.split('/')[-1]
t1 = time.time()
class SVD(Process):
'''
Calculate the size of the main data in memory and compare to max_mem
We use the minimum of the actual dtype's itemsize and float32 since we
don't want to read it in yet and do the proper type conversions.
'''
func, is_complex, is_compound, n_features, n_samples, type_mult = check_dtype(h5_main)
if num_comps is None:
num_comps = min(n_samples, n_features)
else:
num_comps = min(n_samples, n_features, num_comps)
def __init__(self, h5_main, num_components=None):
'''
Check if a number of compnents has been set and ensure that the number is less than
the minimum axis length of the data. If both conditions are met, use fsvd. If not
use the regular svd.
C.Smith -- We might need to put a lower limit on num_comps in the future. I don't
know enough about svd to be sure.
'''
print('Performing SVD decomposition')
super(SVD, self).__init__(h5_main)
self.process_name = 'SVD'
U, S, V = randomized_svd(func(h5_main), num_comps, n_iter=3)
svd_type = 'sklearn-randomized'
print('SVD took {} seconds. Writing results to file.'.format(round(time.time() - t1, 2)))
'''
Create datasets for V and S, deleting original arrays afterward to save
memory.
'''
ds_S = MicroDataset('S', data=np.float32(S))
ds_S.attrs['labels'] = {'Principal Component': [slice(0, None)]}
ds_S.attrs['units'] = ''
ds_inds = MicroDataset('Component_Indices', data=np.uint32(np.arange(len(S))))
ds_inds.attrs['labels'] = {'Principal Component': [slice(0, None)]}
ds_inds.attrs['units'] = ''
del S
u_chunks = calc_chunks(U.shape, np.float32(0).itemsize)
ds_U = MicroDataset('U', data=np.float32(U), chunking=u_chunks)
del U
V = transformToTargetType(V, h5_main.dtype)
v_chunks = calc_chunks(V.shape, h5_main.dtype.itemsize)
ds_V = MicroDataset('V', data=V, chunking=v_chunks)
del V
'''
Create the Group to hold the results and add the existing datasets as
children
'''
grp_name = dset_name + '-SVD_'
svd_grp = MicroDataGroup(grp_name, h5_main.parent.name[1:])
svd_grp.addChildren([ds_V, ds_S, ds_U, ds_inds])
'''
Write the attributes to the group
'''
svd_grp.attrs['num_components'] = num_comps
svd_grp.attrs['svd_method'] = svd_type
'''
Write the data and retrieve the HDF5 objects then delete the Microdatasets
'''
hdf = ioHDF5(h5_main.file)
h5_svd_refs = hdf.writeData(svd_grp)
h5_U = getH5DsetRefs(['U'], h5_svd_refs)[0]
h5_S = getH5DsetRefs(['S'], h5_svd_refs)[0]
h5_V = getH5DsetRefs(['V'], h5_svd_refs)[0]
h5_svd_inds = getH5DsetRefs(['Component_Indices'], h5_svd_refs)[0]
h5_svd_grp = h5_S.parent
# copy attributes
copy_main_attributes(h5_main, h5_V)
h5_V.attrs['units'] = np.array(['a. u.'], dtype='S')
del ds_S, ds_V, ds_U, svd_grp
# Will attempt to see if there is anything linked to this dataset.
# Since I was meticulous about the translators that I wrote, I know I will find something here
checkAndLinkAncillary(h5_U,
['Position_Indices', 'Position_Values'],
h5_main=h5_main)
checkAndLinkAncillary(h5_V,
['Position_Indices', 'Position_Values'],
anc_refs=[h5_svd_inds, h5_S])
checkAndLinkAncillary(h5_U,
['Spectroscopic_Indices', 'Spectroscopic_Values'],
anc_refs=[h5_svd_inds, h5_S])
checkAndLinkAncillary(h5_V,
['Spectroscopic_Indices', 'Spectroscopic_Values'],
h5_main=h5_main)
'''
Check h5_main for plot group references.
Copy them into V if they exist
'''
for key in h5_main.attrs.keys():
if '_Plot_Group' not in key:
continue
ref_inds = getH5RegRefIndices(h5_main.attrs[key], h5_main, return_method='corners')
ref_inds = ref_inds.reshape([-1, 2, 2])
ref_inds[:, 1, 0] = h5_V.shape[0] - 1
svd_ref = createRefFromIndices(h5_V, ref_inds)
h5_V.attrs[key] = svd_ref
return h5_svd_grp
'''
Calculate the size of the main data in memory and compare to max_mem
We use the minimum of the actual dtype's itemsize and float32 since we
don't want to read it in yet and do the proper type conversions.
'''
self.data_transform_func, is_complex, is_compound, n_features, n_samples, type_mult = check_dtype(h5_main)
if num_components is None:
num_components = min(n_samples, n_features)
else:
num_components = min(n_samples, n_features, num_components)
self.num_components = num_components
self.parms_dict = {'num_components': num_components}
self.duplicate_h5_groups = self._check_for_duplicates()
def compute(self):
"""
Computes SVD and writes results to file
Returns
-------
h5_results_grp : h5py.Datagroup object
Datagroup containing all the results
"""
'''
Check if a number of compnents has been set and ensure that the number is less than
the minimum axis length of the data. If both conditions are met, use fsvd. If not
use the regular svd.
C.Smith -- We might need to put a lower limit on num_comps in the future. I don't
know enough about svd to be sure.
'''
print('Performing SVD decomposition')
t1 = time.time()
U, S, V = randomized_svd(self.data_transform_func(self.h5_main), self.num_components, n_iter=3)
print('SVD took {} seconds. Writing results to file.'.format(round(time.time() - t1, 2)))
self._write_results_chunk(U, S, V)
del U, S, V
return self.h5_results_grp
def _write_results_chunk(self, U, S, V):
"""
Writes the provided SVD results to file