Commit 2c5dcc9f authored by ssomnath's avatar ssomnath
Browse files

Merge branch 'mpi_dev_local'

parents 2c31bfa1 67e49c13
......@@ -10,13 +10,6 @@ Submodules
core
"""
import pyUSID as core
from pyUSID.viz import *
from pyUSID.processing import *
from pyUSID.io import *
# For legacy reasons:
from pyUSID import USIDataset as PycroDataset
from warnings import warn
from .io import translators
from .io.translators import ImageTranslator # Use pycroscopy version of ImageTranslator rather than pyUSID's
from . import analysis
......@@ -25,10 +18,3 @@ from . import viz
from .__version__ import version as __version__
from .__version__ import time as __time__
warn('Contents of pycroscopy.core such as hdf_utils, plot_utils have been moved to pyUSID but will continue to be '
'available implicitly till the next release. Please update import statements to import such modules directly from'
'pyUSID. See - https://pycroscopy.github.io/pycroscopy/whats_new.html under June 28 2018', FutureWarning)
__all__ = core.__all__
__all__ += ['PycroDataset']
......@@ -13,7 +13,8 @@ import time as tm
from .guess_methods import GuessMethods
from .fit_methods import Fit_Methods
from pyUSID import USIDataset
from pyUSID.io.io_utils import get_available_memory, recommend_cpu_cores, format_time
from pyUSID.processing.comp_utils import get_available_memory, recommend_cpu_cores
from pyUSID.io.io_utils import format_time
from pyUSID.io.hdf_utils import check_for_old, find_results_groups, check_for_matching_attrs, get_attr
from .optimize import Optimize
......
......@@ -9,7 +9,8 @@ Created on Thu Nov 02 11:48:53 2017
from __future__ import division, print_function, absolute_import, unicode_literals
import numpy as np
from pyUSID.processing.process import Process, parallel_compute
from pyUSID.processing.process import Process
from pyUSID.processing.comp_utils import parallel_compute
from pyUSID.io.dtype_utils import stack_real_to_compound
from pyUSID.io.hdf_utils import write_main_dataset, create_results_group, create_empty_dataset, write_simple_attrs, \
print_tree, get_attributes
......@@ -50,7 +51,7 @@ class GIVBayesian(Process):
self.num_x_steps = int(num_x_steps)
if self.num_x_steps % 4 == 0:
self.num_x_steps = ((self.num_x_steps // 2) + 1) * 2
if self.verbose:
if self.verbose and self.mpi_rank == 0:
print('ensuring that half steps should be odd, num_x_steps is now', self.num_x_steps)
self.h5_main = USIDataset(self.h5_main)
......@@ -80,6 +81,8 @@ class GIVBayesian(Process):
self.forward_results = None
self._bayes_parms = None
self.__first_batch = True
def test(self, pix_ind=None, show_plots=True):
"""
Tests the inference on a single pixel (randomly chosen unless manually specified) worth of data.
......@@ -95,6 +98,9 @@ class GIVBayesian(Process):
-------
fig, axes
"""
if self.mpi_rank > 0:
return
if pix_ind is None:
pix_ind = np.random.randint(0, high=self.h5_main.shape[0])
other_params = self.parms_dict.copy()
......@@ -104,7 +110,7 @@ class GIVBayesian(Process):
return bayesian_inference_on_period(self.h5_main[pix_ind], self.single_ao, self.parms_dict['freq'],
show_plots=show_plots, **other_params)
def _set_memory_and_cores(self, cores=1, mem=1024):
def _set_memory_and_cores(self, cores=None, mem=None):
"""
Checks hardware limitations such as memory, # cpus and sets the recommended datachunk sizes and the
number of cores to be used by analysis methods.
......@@ -124,8 +130,9 @@ class GIVBayesian(Process):
# raw, compensated current, resistance, variance
self._max_pos_per_read = self._max_pos_per_read // 4 # Integer division
# Since these computations take far longer than functional fitting, do in smaller batches:
self._max_pos_per_read = min(100, self._max_pos_per_read)
if self.verbose:
self._max_pos_per_read = min(1000, self._max_pos_per_read)
if self.verbose and self.mpi_rank == 0:
print('Max positions per read set to {}'.format(self._max_pos_per_read))
def _create_results_datasets(self):
......@@ -135,35 +142,36 @@ class GIVBayesian(Process):
# create all h5 datasets here:
num_pos = self.h5_main.shape[0]
if self.verbose:
if self.verbose and self.mpi_rank == 0:
print('Now creating the datasets')
h5_group = create_results_group(self.h5_main, self.process_name)
self.h5_results_grp = h5_group
write_simple_attrs(h5_group, {'algorithm_author': 'Kody J. Law', 'last_pixel': 0})
write_simple_attrs(h5_group, self.parms_dict)
self.h5_results_grp = create_results_group(self.h5_main, self.process_name)
if self.verbose:
print('created group: {}'.format(h5_group.name))
print(get_attributes(h5_group))
write_simple_attrs(self.h5_results_grp, {'algorithm_author': 'Kody J. Law', 'last_pixel': 0})
write_simple_attrs(self.h5_results_grp, self.parms_dict)
if self.verbose and self.mpi_rank == 0:
print('created group: {} with attributes:'.format(self.h5_results_grp.name))
print(get_attributes(self.h5_results_grp))
# One of those rare instances when the result is exactly the same as the source
self.h5_i_corrected = create_empty_dataset(self.h5_main, np.float32, 'Corrected_Current', h5_group=h5_group)
self.h5_i_corrected = create_empty_dataset(self.h5_main, np.float32, 'Corrected_Current', h5_group=self.h5_results_grp)
if self.verbose:
if self.verbose and self.mpi_rank == 0:
print('Created I Corrected')
print_tree(h5_group)
# print_tree(self.h5_results_grp)
# For some reason, we cannot specify chunks or compression!
# The resistance dataset requires the creation of a new spectroscopic dimension
self.h5_resistance = write_main_dataset(h5_group, (num_pos, self.num_x_steps), 'Resistance', 'Resistance',
self.h5_resistance = write_main_dataset(self.h5_results_grp, (num_pos, self.num_x_steps), 'Resistance', 'Resistance',
'GOhms', None, Dimension('Bias', 'V', self.num_x_steps),
dtype=np.float32, chunks=(1, self.num_x_steps), compression='gzip',
dtype=np.float32, # chunks=(1, self.num_x_steps), #compression='gzip',
h5_pos_inds=self.h5_main.h5_pos_inds,
h5_pos_vals=self.h5_main.h5_pos_vals)
if self.verbose:
if self.verbose and self.mpi_rank == 0:
print('Created Resistance')
print_tree(h5_group)
# print_tree(self.h5_results_grp)
assert isinstance(self.h5_resistance, USIDataset) # only here for PyCharm
self.h5_new_spec_vals = self.h5_resistance.h5_spec_vals
......@@ -171,21 +179,23 @@ class GIVBayesian(Process):
# The variance is identical to the resistance dataset
self.h5_variance = create_empty_dataset(self.h5_resistance, np.float32, 'R_variance')
if self.verbose:
if self.verbose and self.mpi_rank == 0:
print('Created Variance')
print_tree(h5_group)
# print_tree(self.h5_results_grp)
# The capacitance dataset requires new spectroscopic dimensions as well
self.h5_cap = write_main_dataset(h5_group, (num_pos, 1), 'Capacitance', 'Capacitance', 'pF', None,
self.h5_cap = write_main_dataset(self.h5_results_grp, (num_pos, 1), 'Capacitance', 'Capacitance', 'pF', None,
Dimension('Direction', '', [1]), h5_pos_inds=self.h5_main.h5_pos_inds,
h5_pos_vals=self.h5_main.h5_pos_vals, dtype=cap_dtype, compression='gzip',
h5_pos_vals=self.h5_main.h5_pos_vals, dtype=cap_dtype, #compression='gzip',
aux_spec_prefix='Cap_Spec_')
if self.verbose:
if self.verbose and self.mpi_rank == 0:
print('Created Capacitance')
print_tree(h5_group)
print('Done!')
# print_tree(self.h5_results_grp)
print('Done creating all results datasets!')
if self.mpi_size > 1:
self.mpi_comm.Barrier()
self.h5_main.file.flush()
def _get_existing_datasets(self):
......@@ -204,7 +214,8 @@ class GIVBayesian(Process):
"""
if self.verbose:
print('Started accumulating all results')
print('Rank {} - Started accumulating results for this chunk'.format(self.mpi_rank))
num_pixels = len(self.forward_results)
cap_mat = np.zeros((num_pixels, 2), dtype=np.float32)
r_inf_mat = np.zeros((num_pixels, self.num_x_steps), dtype=np.float32)
......@@ -245,26 +256,19 @@ class GIVBayesian(Process):
# Now write to h5 files:
if self.verbose:
print('Finished accumulating results. Writing to h5')
print('Rank {} - Finished accumulating results. Writing results of chunk to h5'.format(self.mpi_rank))
if self._start_pos == 0:
if self.__first_batch:
self.h5_new_spec_vals[0, :] = full_results['x'] # Technically this needs to only be done once
self.__first_batch = False
pos_slice = slice(self._start_pos, self._end_pos)
self.h5_cap[pos_slice] = np.atleast_2d(stack_real_to_compound(cap_mat, cap_dtype)).T
self.h5_variance[pos_slice] = r_var_mat
self.h5_resistance[pos_slice] = r_inf_mat
self.h5_i_corrected[pos_slice] = i_cor_sin_mat
# Get access to the private variable:
pos_in_batch = self._get_pixels_in_current_batch()
# Leaving in this provision that will allow restarting of processes
self.h5_results_grp.attrs['last_pixel'] = self._end_pos
self.h5_main.file.flush()
print('Finished processing up to pixel ' + str(self._end_pos) + ' of ' + str(self.h5_main.shape[0]))
# Now update the start position
self._start_pos = self._end_pos
self.h5_cap[pos_in_batch, :] = np.atleast_2d(stack_real_to_compound(cap_mat, cap_dtype)).T
self.h5_variance[pos_in_batch, :] = r_var_mat
self.h5_resistance[pos_in_batch, :] = r_inf_mat
self.h5_i_corrected[pos_in_batch, :] = i_cor_sin_mat
def _unit_computation(self, *args, **kwargs):
"""
......@@ -282,22 +286,24 @@ class GIVBayesian(Process):
# first roll the data
rolled_raw_data = np.roll(self.data, self.roll_pts, axis=1)
# Ensure that the bias has a positive slope. Multiply current by -1 accordingly
if self.verbose:
print('Rank {} beginning parallel compute for Forward'.format(self.mpi_rank))
self.reverse_results = parallel_compute(rolled_raw_data[:, :half_v_steps] * -1, do_bayesian_inference,
cores=self._cores,
func_args=[self.rolled_bias[:half_v_steps] * -1, self.ex_freq],
func_kwargs=self._bayes_parms, lengthy_computation=True,
func_kwargs=self._bayes_parms, lengthy_computation=False,
verbose=self.verbose)
if self.verbose:
print('Finished processing forward sections. Now working on reverse sections....')
print('Rank {} finished processing forward sections. Now working on reverse sections'.format(self.mpi_rank))
self.forward_results = parallel_compute(rolled_raw_data[:, half_v_steps:], do_bayesian_inference,
cores=self._cores,
func_args=[self.rolled_bias[half_v_steps:], self.ex_freq],
func_kwargs=self._bayes_parms, lengthy_computation=True,
func_kwargs=self._bayes_parms, lengthy_computation=False,
verbose=self.verbose)
if self.verbose:
print('Finished processing reverse loops')
print('Rank {} Finished processing reverse loops (and this chunk)'.format(self.mpi_rank))
def compute(self, override=False, *args, **kwargs):
"""
......
......@@ -17,7 +17,7 @@ from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from pyUSID.io.io_utils import recommend_cpu_cores
from pyUSID.processing.comp_utils import recommend_cpu_cores
from pyUSID.io.dtype_utils import stack_real_to_compound
from ...io.virtual_data import VirtualDataset, VirtualGroup
from ...io.hdf_writer import HDFwriter
......
......@@ -11,7 +11,7 @@ import multiprocessing as mp
import time as tm
import matplotlib.pyplot as plt
from pyUSID.io.io_utils import recommend_cpu_cores
from pyUSID.processing.comp_utils import recommend_cpu_cores
from ...io.virtual_data import VirtualDataset, VirtualGroup
from ...io.hdf_writer import HDFwriter
from pyUSID.viz.plot_utils import cmap_jet_white_center
......
......@@ -21,7 +21,7 @@ from pyUSID.io.hdf_utils import write_ind_val_dsets, write_main_dataset, write_r
create_indexed_group, write_simple_attrs, write_book_keeping_attrs, copy_attributes,\
write_reduced_spec_dsets
from pyUSID.io.usi_data import USIDataset
from pyUSID.io.io_utils import get_available_memory
from pyUSID.processing.comp_utils import get_available_memory
class BEodfTranslator(Translator):
"""
......
......@@ -18,7 +18,7 @@ import xlrd as xlreader
from pyUSID.io.hdf_utils import get_auxiliary_datasets, find_dataset, get_h5_obj_refs, link_h5_objects_as_attrs, \
get_attr, create_indexed_group, write_simple_attrs, write_main_dataset, Dimension
from pyUSID.io.write_utils import create_spec_inds_from_vals
from pyUSID.io.io_utils import get_available_memory, recommend_cpu_cores
from pyUSID.processing.comp_utils import get_available_memory, recommend_cpu_cores
from ....analysis.optimize import Optimize
from ....processing.histogram import build_histogram
from ....viz.be_viz_utils import plot_1d_spectrum, plot_2d_spectrogram, plot_histograms
......
......@@ -12,7 +12,8 @@ import sklearn.cluster as cls
from scipy.cluster.hierarchy import linkage
from scipy.spatial.distance import pdist
from .proc_utils import get_component_slice
from pyUSID.processing.process import Process, parallel_compute
from pyUSID.processing.process import Process
from pyUSID.processing.comp_utils import parallel_compute
from pyUSID.io.hdf_utils import reshape_to_n_dims, create_results_group, write_main_dataset, get_attr, \
write_simple_attrs, link_h5_obj_as_alias, write_ind_val_dsets
from pyUSID import USIDataset
......@@ -264,7 +265,6 @@ class Cluster(Process):
h5_cluster_group = create_results_group(self.h5_main, self.process_name)
write_simple_attrs(h5_cluster_group, self.parms_dict)
h5_cluster_group.attrs['last_pixel'] = self.h5_main.shape[0]
h5_labels = write_main_dataset(h5_cluster_group, np.uint32(self.__labels.reshape([-1, 1])), 'Labels',
'Cluster ID', 'a. u.', None, Dimension('Cluster', 'ID', 1),
......@@ -305,6 +305,13 @@ class Cluster(Process):
h5_spec_inds=h5_inds, aux_pos_prefix='Mean_Resp_Pos_',
h5_spec_vals=h5_vals)
# Marking completion:
self._status_dset_name = 'completed_positions'
self._h5_status_dset = h5_cluster_group.create_dataset(self._status_dset_name,
data=np.ones(self.h5_main.shape[0], dtype=np.uint8))
# keeping legacy option:
h5_cluster_group.attrs['last_pixel'] = self.h5_main.shape[0]
return h5_cluster_group
......
......@@ -208,7 +208,7 @@ class Decomposition(Process):
h5_decomp_group = create_results_group(self.h5_main, self.process_name)
write_simple_attrs(h5_decomp_group, self.parms_dict)
write_simple_attrs(h5_decomp_group, {'n_components': self.__components.shape[0],
'n_samples': self.h5_main.shape[0], 'last_pixel': self.h5_main.shape[0]})
'n_samples': self.h5_main.shape[0]})
decomp_desc = Dimension('Endmember', 'a. u.', self.__components.shape[0])
......@@ -226,4 +226,12 @@ class Decomposition(Process):
# return the h5 group object
self.h5_results_grp = h5_decomp_group
# Marking completion:
self._status_dset_name = 'completed_positions'
self._h5_status_dset = h5_decomp_group.create_dataset(self._status_dset_name,
data=np.ones(self.h5_main.shape[0], dtype=np.uint8))
# keeping legacy option:
h5_decomp_group.attrs['last_pixel'] = self.h5_main.shape[0]
return self.h5_results_grp
......@@ -17,7 +17,7 @@ from sklearn.utils import gen_batches
from pyUSID import USIDataset
from pyUSID.io.hdf_utils import get_h5_obj_refs, copy_attributes, link_h5_objects_as_attrs, find_results_groups, \
link_as_main, check_for_old
from pyUSID.io.io_utils import get_available_memory
from pyUSID.processing.comp_utils import get_available_memory
from pyUSID.io.write_utils import make_indices_matrix, get_aux_dset_slicing, INDICES_DTYPE, VALUES_DTYPE, calc_chunks
from ..io.hdf_writer import HDFwriter
from ..io.virtual_data import VirtualGroup, VirtualDataset
......
......@@ -10,7 +10,8 @@ from __future__ import division, print_function, absolute_import, unicode_litera
import h5py
import numpy as np
from collections import Iterable
from pyUSID.processing.process import Process, parallel_compute
from pyUSID.processing.process import Process
from pyUSID.processing.comp_utils import parallel_compute
from pyUSID.io.hdf_utils import create_results_group, write_main_dataset, write_simple_attrs, create_empty_dataset, \
write_ind_val_dsets
from pyUSID.io.write_utils import Dimension
......@@ -25,7 +26,6 @@ class SignalFilter(Process):
write_condensed=False, num_pix=1, phase_rad=0, **kwargs):
"""
Filters the entire h5 dataset with the given filtering parameters.
Parameters
----------
h5_main : h5py.Dataset object
......@@ -94,7 +94,7 @@ class SignalFilter(Process):
scaling_factor = 1 + 2 * self.write_filtered + 0.25 * self.write_condensed
self._max_pos_per_read = int(self._max_pos_per_read / scaling_factor)
if self.verbose:
if self.verbose and self.mpi_rank == 0:
print('Allowed to read {} pixels per chunk'.format(self._max_pos_per_read))
self.parms_dict = dict()
......@@ -119,7 +119,6 @@ class SignalFilter(Process):
def test(self, pix_ind=None, excit_wfm=None, **kwargs):
"""
Tests the signal filter on a single pixel (randomly chosen unless manually specified) worth of data.
Parameters
----------
pix_ind : int, optional. default = random
......@@ -129,11 +128,12 @@ class SignalFilter(Process):
length of a single pixel's data. For example, in the case of G-mode, where a single scan line is yet to be
broken down into pixels, the excitation waveform for a single pixel can br provided to automatically
break the raw and filtered responses also into chunks of the same size.
Returns
-------
fig, axes
"""
if self.mpi_rank > 0:
return
if pix_ind is None:
pix_ind = np.random.randint(0, high=self.h5_main.shape[0])
return test_filter(self.h5_main[pix_ind], frequency_filters=self.frequency_filters, excit_wfm=excit_wfm,
......@@ -148,6 +148,7 @@ class SignalFilter(Process):
self.h5_results_grp = create_results_group(self.h5_main, self.process_name)
self.parms_dict.update({'last_pixel': 0, 'algorithm': 'pycroscopy_SignalFilter'})
write_simple_attrs(self.h5_results_grp, self.parms_dict)
assert isinstance(self.h5_results_grp, h5py.Group)
......@@ -156,6 +157,9 @@ class SignalFilter(Process):
h5_comp_filt = self.h5_results_grp.create_dataset('Composite_Filter',
data=np.float32(self.composite_filter))
if self.verbose and self.mpi_rank == 0:
print('Rank {} - Finished creating the Composite_Filter dataset'.format(self.mpi_rank))
# First create the position datsets if the new indices are smaller...
if self.num_effective_pix != self.h5_main.shape[0]:
# TODO: Do this part correctly. See past solution:
......@@ -169,26 +173,36 @@ class SignalFilter(Process):
pos_descriptor.append(Dimension(name, units, np.arange(leng)))
ds_pos_inds, ds_pos_vals = build_ind_val_dsets(pos_descriptor, is_spectral=False, verbose=self.verbose)
h5_pos_vals.data = np.atleast_2d(new_pos_vals) # The data generated above varies linearly. Override.
"""
h5_pos_inds_new, h5_pos_vals_new = write_ind_val_dsets(self.h5_results_grp,
Dimension('pixel', 'a.u.', self.num_effective_pix),
is_spectral=False, verbose=self.verbose)
is_spectral=False, verbose=self.verbose and self.mpi_rank==0)
if self.verbose and self.mpi_rank == 0:
print('Rank {} - Created the new position ancillary dataset'.format(self.mpi_rank))
else:
h5_pos_inds_new = self.h5_main.h5_pos_inds
h5_pos_vals_new = self.h5_main.h5_pos_vals
if self.verbose and self.mpi_rank == 0:
print('Rank {} - Reusing source datasets position datasets'.format(self.mpi_rank))
if self.noise_threshold is not None:
self.h5_noise_floors = write_main_dataset(self.h5_results_grp, (self.num_effective_pix, 1), 'Noise_Floors',
'Noise', 'a.u.', None, Dimension('arb', '', [1]),
dtype=np.float32, aux_spec_prefix='Noise_Spec_',
h5_pos_inds=h5_pos_inds_new, h5_pos_vals=h5_pos_vals_new,
verbose=self.verbose)
verbose=self.verbose and self.mpi_rank == 0)
if self.verbose and self.mpi_rank == 0:
print('Rank {} - Finished creating the Noise_Floors dataset'.format(self.mpi_rank))
if self.write_filtered:
# Filtered data is identical to Main_Data in every way - just a duplicate
self.h5_filtered = create_empty_dataset(self.h5_main, self.h5_main.dtype, 'Filtered_Data',
h5_group=self.h5_results_grp)
if self.verbose and self.mpi_rank == 0:
print('Rank {} - Finished creating the Filtered dataset'.format(self.mpi_rank))
self.hot_inds = None
......@@ -199,7 +213,13 @@ class SignalFilter(Process):
self.h5_condensed = write_main_dataset(self.h5_results_grp, (self.num_effective_pix, len(self.hot_inds)),
'Condensed_Data', 'Complex', 'a. u.', None, condensed_spec,
h5_pos_inds=h5_pos_inds_new, h5_pos_vals=h5_pos_vals_new,
dtype=np.complex, verbose=self.verbose)
dtype=np.complex, verbose=self.verbose and self.mpi_rank == 0)
if self.verbose and self.mpi_rank == 0:
print('Rank {} - Finished creating the Condensed dataset'.format(self.mpi_rank))
if self.mpi_size > 1:
self.mpi_comm.Barrier()
self.h5_main.file.flush()
def _get_existing_datasets(self):
"""
......@@ -216,30 +236,21 @@ class SignalFilter(Process):
"""
Writes data chunks back to the file
"""
pos_slice = slice(self._start_pos, self._end_pos)
# Get access to the private variable:
pos_in_batch = self._get_pixels_in_current_batch()
if self.write_condensed:
self.h5_condensed[pos_slice] = self.condensed_data
self.h5_condensed[pos_in_batch, :] = self.condensed_data
if self.noise_threshold is not None:
self.h5_noise_floors[pos_slice] = np.atleast_2d(self.noise_floors)
self.h5_noise_floors[pos_in_batch, :] = np.atleast_2d(self.noise_floors)
if self.write_filtered:
self.h5_filtered[pos_slice] = self.filtered_data
# Leaving in this provision that will allow restarting of processes
self.h5_results_grp.attrs['last_pixel'] = self._end_pos
self.h5_main.file.flush()
print('Finished processing upto pixel ' + str(self._end_pos) + ' of ' + str(self.h5_main.shape[0]))
self.h5_filtered[pos_in_batch, :] = self.filtered_data
# Now update the start position
self._start_pos = self._end_pos
# Not responsible for checkpointing anymore. Process class handles this.
def _unit_computation(self, *args, **kwargs):
"""
Processing per chunk of the dataset
Parameters
----------
args : list
......@@ -275,3 +286,4 @@ class SignalFilter(Process):
# do np.roll on data
# self.data = np.roll(self.data, 0, axis=1)
pass
......@@ -17,7 +17,8 @@ from .proc_utils import get_component_slice
from pyUSID.io.hdf_utils import find_results_groups, get_indices_for_region_ref, \
create_region_reference, copy_attributes, reshape_to_n_dims, get_attr, write_main_dataset, \
create_results_group, write_simple_attrs, create_indexed_group
from pyUSID.io.io_utils import get_available_memory, format_time
from pyUSID.processing.comp_utils import get_available_memory
from pyUSID.io.io_utils import format_time
from pyUSID.io.dtype_utils import check_dtype, stack_real_to_target_dtype
from pyUSID.io.write_utils import Dimension, calc_chunks
from pyUSID import USIDataset
......@@ -176,7 +177,7 @@ class SVD(Process):
self.h5_results_grp = h5_svd_group
write_simple_attrs(h5_svd_group, self.parms_dict)
write_simple_attrs(h5_svd_group, {'svd_method': 'sklearn-randomized', 'last_pixel': self.h5_main.shape[0]})
write_simple_attrs(h5_svd_group, {'svd_method': 'sklearn-randomized'})
h5_u = write_main_dataset(h5_svd_group, np.float32(self.__u), 'U', 'Abundance', 'a.u.', None, comp_dim,
h5_pos_inds=self.h5_main.h5_pos_inds, h5_pos_vals=self.h5_main.h5_pos_vals,
......@@ -206,6 +207,13 @@ class SVD(Process):
h5_v.attrs[key] = svd_ref
# Marking completion:
self._status_dset_name = 'completed_positions'
self._h5_status_dset = h5_svd_group.create_dataset(self._status_dset_name,
data=np.ones(self.h5_main.shape[0], dtype=np.uint8))
# keeping legacy option:
h5_svd_group.attrs['last_pixel'] = self.h5_main.shape[0]
def _check_available_mem(self):
"""
Check that there is enough memory to perform the SVD decomposition.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment