Commit 23660367 authored by Somnath, Suhas's avatar Somnath, Suhas
Browse files

Renamed MicroData to VirtualData and so on.

parent 57890b93
......@@ -69,19 +69,19 @@ data1 = np.random.rand(5, 7)
# Now use the array to build the dataset. This dataset will live
# directly under the root of the file. The MicroDataset class also implements the
# compression and chunking parameters from h5py.Dataset.
ds_main = px.MicroDataset('Main_Data', data=data1, parent='/')
ds_main = px.VirtualDataset('Main_Data', data=data1, parent='/')
##############################################################################
# We can also create an empty dataset and write the values in later
# With this method, it is neccessary to specify the dtype and maxshape kwarg parameters.
ds_empty = px.MicroDataset('Empty_Data', data=[], dtype=np.float32, maxshape=[7, 5, 3])
ds_empty = px.VirtualDataset('Empty_Data', data=[], dtype=np.float32, maxshape=[7, 5, 3])
##############################################################################
# We can also create groups and add other MicroData objects as children.
# If the group's parent is not given, it will be set to root.
data_group = px.MicroDataGroup('Data_Group', parent='/')
data_group = px.VirtualGroup('Data_Group', parent='/')
root_group = px.MicroDataGroup('/')
root_group = px.VirtualGroup('/')
# After creating the group, we then add an existing object as its child.
data_group.add_children([ds_empty])
......
......@@ -302,11 +302,11 @@ print('Labels', labels_mat.shape)
# Remember that it is important to either inherit or add the `quantity` and `units` attributes to each **main** dataset
# The two main datasets
ds_label_mat = px.MicroDataset('Labels', labels_mat, dtype=np.uint32)
ds_label_mat = px.VirtualDataset('Labels', labels_mat, dtype=np.uint32)
# Adding the mandatory attributes
ds_label_mat.attrs = {'quantity': 'Cluster ID', 'units': 'a. u.'}
ds_cluster_centroids = px.MicroDataset('Mean_Response', centroids, dtype=h5_main.dtype)
ds_cluster_centroids = px.VirtualDataset('Mean_Response', centroids, dtype=h5_main.dtype)
# Inhereting / copying the mandatory attributes
px.hdf_utils.copy_main_attributes(h5_main, ds_cluster_centroids)
......@@ -339,8 +339,8 @@ operation_name = 'Cluster'
subtree_root_path = h5_main.parent.name[1:]
cluster_grp = px.MicroDataGroup(source_dset_name + '-' + operation_name + '_',
subtree_root_path)
cluster_grp = px.VirtualGroup(source_dset_name + '-' + operation_name + '_',
subtree_root_path)
print('New group to be created with name:', cluster_grp.name)
print('This group (subtree) will be appended to the H5 file under the group:', subtree_root_path)
......
......@@ -128,9 +128,9 @@ class ShoGuess(px.Process):
self.step_start_inds = np.where(h5_spec_inds[0] == 0)[0]
self.num_udvs_steps = len(self.step_start_inds)
ds_guess = px.MicroDataset('Guess', data=[],
maxshape=(self.h5_main.shape[0], self.num_udvs_steps),
chunking=(1, self.num_udvs_steps), dtype=sho32)
ds_guess = px.VirtualDataset('Guess', data=[],
maxshape=(self.h5_main.shape[0], self.num_udvs_steps),
chunking=(1, self.num_udvs_steps), dtype=sho32)
not_freq = px.hdf_utils.get_attr(h5_spec_inds, 'labels') != 'Frequency'
......@@ -138,7 +138,7 @@ class ShoGuess(px.Process):
self.step_start_inds)
dset_name = self.h5_main.name.split('/')[-1]
sho_grp = px.MicroDataGroup('-'.join([dset_name, 'SHO_Fit_']), self.h5_main.parent.name[1:])
sho_grp = px.VirtualGroup('-'.join([dset_name, 'SHO_Fit_']), self.h5_main.parent.name[1:])
sho_grp.add_children([ds_guess, ds_sho_inds, ds_sho_vals])
sho_grp.attrs['SHO_guess_method'] = "pycroscopy BESHO"
......
......@@ -69,19 +69,19 @@ data1 = np.random.rand(5, 7)
# Now use the array to build the dataset. This dataset will live
# directly under the root of the file. The MicroDataset class also implements the
# compression and chunking parameters from h5py.Dataset.
ds_main = px.MicroDataset('Main_Data', data=data1, parent='/')
ds_main = px.VirtualDataset('Main_Data', data=data1, parent='/')
##############################################################################
# We can also create an empty dataset and write the values in later
# With this method, it is neccessary to specify the dtype and maxshape kwarg parameters.
ds_empty = px.MicroDataset('Empty_Data', data=[], dtype=np.float32, maxshape=[7, 5, 3])
ds_empty = px.VirtualDataset('Empty_Data', data=[], dtype=np.float32, maxshape=[7, 5, 3])
##############################################################################
# We can also create groups and add other MicroData objects as children.
# If the group's parent is not given, it will be set to root.
data_group = px.MicroDataGroup('Data_Group', parent='/')
data_group = px.VirtualGroup('Data_Group', parent='/')
root_group = px.MicroDataGroup('/')
root_group = px.VirtualGroup('/')
# After creating the group, we then add an existing object as its child.
data_group.add_children([ds_empty])
......
......@@ -302,11 +302,11 @@ print('Labels', labels_mat.shape)
# Remember that it is important to either inherit or add the `quantity` and `units` attributes to each **main** dataset
# The two main datasets
ds_label_mat = px.MicroDataset('Labels', labels_mat, dtype=np.uint32)
ds_label_mat = px.VirtualDataset('Labels', labels_mat, dtype=np.uint32)
# Adding the mandatory attributes
ds_label_mat.attrs = {'quantity': 'Cluster ID', 'units': 'a. u.'}
ds_cluster_centroids = px.MicroDataset('Mean_Response', centroids, dtype=h5_main.dtype)
ds_cluster_centroids = px.VirtualDataset('Mean_Response', centroids, dtype=h5_main.dtype)
# Inhereting / copying the mandatory attributes
px.hdf_utils.copy_main_attributes(h5_main, ds_cluster_centroids)
......@@ -339,8 +339,8 @@ operation_name = 'Cluster'
subtree_root_path = h5_main.parent.name[1:]
cluster_grp = px.MicroDataGroup(source_dset_name + '-' + operation_name + '_',
subtree_root_path)
cluster_grp = px.VirtualGroup(source_dset_name + '-' + operation_name + '_',
subtree_root_path)
print('New group to be created with name:', cluster_grp.name)
print('This group (subtree) will be appended to the H5 file under the group:', subtree_root_path)
......
......@@ -128,9 +128,9 @@ class ShoGuess(px.Process):
self.step_start_inds = np.where(h5_spec_inds[0] == 0)[0]
self.num_udvs_steps = len(self.step_start_inds)
ds_guess = px.MicroDataset('Guess', data=[],
maxshape=(self.h5_main.shape[0], self.num_udvs_steps),
chunking=(1, self.num_udvs_steps), dtype=sho32)
ds_guess = px.VirtualDataset('Guess', data=[],
maxshape=(self.h5_main.shape[0], self.num_udvs_steps),
chunking=(1, self.num_udvs_steps), dtype=sho32)
not_freq = px.hdf_utils.get_attr(h5_spec_inds, 'labels') != 'Frequency'
......@@ -138,7 +138,7 @@ class ShoGuess(px.Process):
self.step_start_inds)
dset_name = self.h5_main.name.split('/')[-1]
sho_grp = px.MicroDataGroup('-'.join([dset_name, 'SHO_Fit_']), self.h5_main.parent.name[1:])
sho_grp = px.VirtualGroup('-'.join([dset_name, 'SHO_Fit_']), self.h5_main.parent.name[1:])
sho_grp.add_children([ds_guess, ds_sho_inds, ds_sho_vals])
sho_grp.attrs['SHO_guess_method'] = "pycroscopy BESHO"
......
......@@ -75,19 +75,19 @@ data1 = np.random.rand(5, 7)
# Now use the array to build the dataset. This dataset will live
# directly under the root of the file. The MicroDataset class also implements the
# compression and chunking parameters from h5py.Dataset.
ds_main = px.MicroDataset('Main_Data', data=data1, parent='/')
ds_main = px.VirtualDataset('Main_Data', data=data1, parent='/')
##############################################################################
# We can also create an empty dataset and write the values in later
# With this method, it is neccessary to specify the dtype and maxshape kwarg parameters.
ds_empty = px.MicroDataset('Empty_Data', data=[], dtype=np.float32, maxshape=[7, 5, 3])
ds_empty = px.VirtualDataset('Empty_Data', data=[], dtype=np.float32, maxshape=[7, 5, 3])
##############################################################################
# We can also create groups and add other MicroData objects as children.
# If the group's parent is not given, it will be set to root.
data_group = px.MicroDataGroup('Data_Group', parent='/')
data_group = px.VirtualGroup('Data_Group', parent='/')
root_group = px.MicroDataGroup('/')
root_group = px.VirtualGroup('/')
# After creating the group, we then add an existing object as its child.
data_group.add_children([ds_empty])
......
......@@ -300,11 +300,11 @@ print('Labels', labels_mat.shape)
# Remember that it is important to either inherit or add the `quantity` and `units` attributes to each **main** dataset
# The two main datasets
ds_label_mat = px.MicroDataset('Labels', labels_mat, dtype=np.uint32)
ds_label_mat = px.VirtualDataset('Labels', labels_mat, dtype=np.uint32)
# Adding the mandatory attributes
ds_label_mat.attrs = {'quantity': 'Cluster ID', 'units': 'a. u.'}
ds_cluster_centroids = px.MicroDataset('Mean_Response', centroids, dtype=h5_main.dtype)
ds_cluster_centroids = px.VirtualDataset('Mean_Response', centroids, dtype=h5_main.dtype)
# Inhereting / copying the mandatory attributes
px.hdf_utils.copy_main_attributes(h5_main, ds_cluster_centroids)
......@@ -337,8 +337,8 @@ operation_name = 'Cluster'
subtree_root_path = h5_main.parent.name[1:]
cluster_grp = px.MicroDataGroup(source_dset_name + '-' + operation_name + '_',
subtree_root_path)
cluster_grp = px.VirtualGroup(source_dset_name + '-' + operation_name + '_',
subtree_root_path)
print('New group to be created with name:', cluster_grp.name)
print('This group (subtree) will be appended to the H5 file under the group:', subtree_root_path)
......
......@@ -128,9 +128,9 @@ class ShoGuess(px.Process):
self.step_start_inds = np.where(h5_spec_inds[0] == 0)[0]
self.num_udvs_steps = len(self.step_start_inds)
ds_guess = px.MicroDataset('Guess', data=[],
maxshape=(self.h5_main.shape[0], self.num_udvs_steps),
chunking=(1, self.num_udvs_steps), dtype=sho32)
ds_guess = px.VirtualDataset('Guess', data=[],
maxshape=(self.h5_main.shape[0], self.num_udvs_steps),
chunking=(1, self.num_udvs_steps), dtype=sho32)
not_freq = px.hdf_utils.get_attr(h5_spec_inds, 'labels') != 'Frequency'
......@@ -138,7 +138,7 @@ class ShoGuess(px.Process):
self.step_start_inds)
dset_name = self.h5_main.name.split('/')[-1]
sho_grp = px.MicroDataGroup('-'.join([dset_name, 'SHO_Fit_']), self.h5_main.parent.name[1:])
sho_grp = px.VirtualGroup('-'.join([dset_name, 'SHO_Fit_']), self.h5_main.parent.name[1:])
sho_grp.add_children([ds_guess, ds_sho_inds, ds_sho_vals])
sho_grp.attrs['SHO_guess_method'] = "pycroscopy BESHO"
......
......@@ -25,7 +25,7 @@ from ..core.io.dtype_utils import compound_to_real, real_to_compound
from ..core.io.hdf_utils import get_h5_obj_refs, get_auxillary_datasets, copy_region_refs, link_h5_objects_as_attrs, \
get_sort_order, get_dimensionality, reshape_to_n_dims, reshape_from_n_dims, build_reduced_spec_dsets, \
get_attr, link_h5_obj_as_alias, create_empty_dataset
from ..core.io.microdata import MicroDataset, MicroDataGroup
from ..core.io.virtual_data import VirtualDataset, VirtualGroup
'''
Custom dtypes for the datasets created during fitting.
......@@ -456,11 +456,11 @@ class BELoopFitter(Fitter):
tot_cycles = cycle_start_inds.size
# Prepare containers for the dataets
ds_projected_loops = MicroDataset('Projected_Loops', data=[], dtype=np.float32,
maxshape=self.h5_main.shape, chunking=self.h5_main.chunks,
compression='gzip')
ds_loop_metrics = MicroDataset('Loop_Metrics', data=[], dtype=loop_metrics32,
maxshape=(self.h5_main.shape[0], tot_cycles))
ds_projected_loops = VirtualDataset('Projected_Loops', data=[], dtype=np.float32,
maxshape=self.h5_main.shape, chunking=self.h5_main.chunks,
compression='gzip')
ds_loop_metrics = VirtualDataset('Loop_Metrics', data=[], dtype=loop_metrics32,
maxshape=(self.h5_main.shape[0], tot_cycles))
ds_loop_met_spec_inds, ds_loop_met_spec_vals = build_reduced_spec_dsets(self._sho_spec_inds, self._sho_spec_vals,
not_fit_dim, cycle_start_inds,
......@@ -469,7 +469,7 @@ class BELoopFitter(Fitter):
# name of the dataset being projected.
dset_name = self.h5_main.name.split('/')[-1]
proj_grp = MicroDataGroup('-'.join([dset_name, 'Loop_Fit_']),
proj_grp = VirtualGroup('-'.join([dset_name, 'Loop_Fit_']),
self.h5_main.parent.name[1:])
proj_grp.attrs['projection_method'] = 'pycroscopy BE loop model'
proj_grp.add_children([ds_projected_loops, ds_loop_metrics,
......
......@@ -11,7 +11,7 @@ from .fitter import Fitter
from ..core.io.pycro_data import PycroDataset
from ..core.io.hdf_utils import build_reduced_spec_dsets, copy_region_refs, link_h5_objects_as_attrs, get_h5_obj_refs, \
create_empty_dataset, get_auxillary_datasets
from ..core.io.microdata import MicroDataset, MicroDataGroup
from ..core.io.virtual_data import VirtualDataset, VirtualGroup
'''
Custom dtype for the datasets created during fitting.
......@@ -67,9 +67,9 @@ class BESHOfitter(Fitter):
links the guess dataset to the spectroscopic datasets.
"""
# Create all the ancilliary datasets, allocate space.....
ds_guess = MicroDataset('Guess', data=[],
maxshape=(self.h5_main.shape[0], self.num_udvs_steps),
chunking=(1, self.num_udvs_steps), dtype=sho32)
ds_guess = VirtualDataset('Guess', data=[],
maxshape=(self.h5_main.shape[0], self.num_udvs_steps),
chunking=(1, self.num_udvs_steps), dtype=sho32)
ds_guess.attrs = self._parms_dict
not_freq = np.array(self.h5_main.spec_dim_labels) != 'Frequency'
......@@ -79,7 +79,7 @@ class BESHOfitter(Fitter):
not_freq, self.step_start_inds)
dset_name = self.h5_main.name.split('/')[-1]
sho_grp = MicroDataGroup('-'.join([dset_name,
sho_grp = VirtualGroup('-'.join([dset_name,
'SHO_Fit_']),
self.h5_main.parent.name[1:])
sho_grp.add_children([ds_guess,
......
......@@ -10,7 +10,7 @@ from __future__ import division, print_function, absolute_import, unicode_litera
import numpy as np
from ..core.processing.process import Process, parallel_compute
from ..core.io.microdata import MicroDataset, MicroDataGroup
from ..core.io.virtual_data import VirtualDataset, VirtualGroup
from ..core.io.dtype_utils import real_to_compound
from ..core.io.hdf_utils import get_h5_obj_refs, get_auxillary_datasets, copy_attributes, link_as_main
from ..core.io.write_utils import build_ind_val_dsets
......@@ -116,8 +116,8 @@ class GIVBayesian(Process):
labels=['Bias'], units=['V'], verbose=self.verbose)
cap_shape = (num_pos, 1)
ds_cap = MicroDataset('Capacitance', data=[], maxshape=cap_shape, dtype=cap_dtype, chunking=cap_shape,
compression='gzip')
ds_cap = VirtualDataset('Capacitance', data=[], maxshape=cap_shape, dtype=cap_dtype, chunking=cap_shape,
compression='gzip')
ds_cap.attrs = {'quantity': 'Capacitance', 'units': 'pF'}
ds_cap_spec_inds, ds_cap_spec_vals = build_ind_val_dsets([1], is_spectral=True,
labels=['Direction'], units=[''], verbose=self.verbose)
......@@ -125,19 +125,19 @@ class GIVBayesian(Process):
ds_cap_spec_inds.name = 'Spectroscopic_Indices_Cap'
ds_cap_spec_vals.name = 'Spectroscopic_Values_Cap'
ds_r_var = MicroDataset('R_variance', data=[], maxshape=(num_pos, self.num_x_steps), dtype=np.float32,
chunking=(1, self.num_x_steps), compression='gzip')
ds_r_var = VirtualDataset('R_variance', data=[], maxshape=(num_pos, self.num_x_steps), dtype=np.float32,
chunking=(1, self.num_x_steps), compression='gzip')
ds_r_var.attrs = {'quantity': 'Resistance', 'units': 'GOhms'}
ds_res = MicroDataset('Resistance', data=[], maxshape=(num_pos, self.num_x_steps), dtype=np.float32,
chunking=(1, self.num_x_steps), compression='gzip')
ds_res = VirtualDataset('Resistance', data=[], maxshape=(num_pos, self.num_x_steps), dtype=np.float32,
chunking=(1, self.num_x_steps), compression='gzip')
ds_res.attrs = {'quantity': 'Resistance', 'units': 'GOhms'}
ds_i_corr = MicroDataset('Corrected_Current', data=[], maxshape=(num_pos, self.single_ao.size),
dtype=np.float32,
chunking=(1, self.single_ao.size), compression='gzip')
ds_i_corr = VirtualDataset('Corrected_Current', data=[], maxshape=(num_pos, self.single_ao.size),
dtype=np.float32,
chunking=(1, self.single_ao.size), compression='gzip')
# don't bother adding any other attributes, all this will be taken from h5_main
bayes_grp = MicroDataGroup(self.h5_main.name.split('/')[-1] + '-' + self.process_name + '_',
parent=self.h5_main.parent.name)
bayes_grp = VirtualGroup(self.h5_main.name.split('/')[-1] + '-' + self.process_name + '_',
parent=self.h5_main.parent.name)
bayes_grp.add_children([ds_spec_inds, ds_spec_vals, ds_cap, ds_r_var, ds_res, ds_i_corr,
ds_cap_spec_inds, ds_cap_spec_vals])
bayes_grp.attrs = {'algorithm_author': 'Kody J. Law', 'last_pixel': 0}
......
......@@ -19,7 +19,7 @@ import matplotlib.patches as patches
from ...core.io.io_utils import recommend_cpu_cores
from ...core.io.dtype_utils import real_to_compound
from ...core.io.microdata import MicroDataset, MicroDataGroup
from ...core.io.virtual_data import VirtualDataset, VirtualGroup
from ...core.io.hdf_writer import HDFwriter
# atom_dtype = np.dtype([('x', np.float32),
......@@ -327,9 +327,9 @@ def fit_atom_positions_dset(h5_grp, fitting_parms=None, num_cores=None):
guess_parms[atom_ind, :num_neighbors_used] = np.squeeze(real_to_compound(guess_coeff, guess_parms.dtype))
fit_parms[atom_ind, :num_neighbors_used] = np.squeeze(real_to_compound(fit_coeff, guess_parms.dtype))
ds_atom_guesses = MicroDataset('Guess', data=guess_parms)
ds_atom_fits = MicroDataset('Fit', data=fit_parms)
dgrp_atom_finding = MicroDataGroup(h5_grp.name.split('/')[-1], parent=h5_grp.parent.name)
ds_atom_guesses = VirtualDataset('Guess', data=guess_parms)
ds_atom_fits = VirtualDataset('Fit', data=fit_parms)
dgrp_atom_finding = VirtualGroup(h5_grp.name.split('/')[-1], parent=h5_grp.parent.name)
dgrp_atom_finding.attrs = fitting_parms
dgrp_atom_finding.add_children([ds_atom_guesses, ds_atom_fits])
......
......@@ -12,7 +12,7 @@ import time as tm
import matplotlib.pyplot as plt
from ...core.io.io_utils import recommend_cpu_cores
from ...core.io.microdata import MicroDataset, MicroDataGroup
from ...core.io.virtual_data import VirtualDataset, VirtualGroup
from ...core.io.hdf_writer import HDFwriter
from ...core.viz.plot_utils import cmap_jet_white_center
......@@ -500,13 +500,13 @@ class Gauss_Fit(object):
Nearest_Neighbor_Indices
"""
ds_atom_guesses = MicroDataset('Gaussian_Guesses', data=self.guess_dataset)
ds_atom_fits = MicroDataset('Gaussian_Fits', data=self.fit_dataset)
ds_motif_guesses = MicroDataset('Motif_Guesses', data=self.motif_guess_dataset)
ds_motif_fits = MicroDataset('Motif_Fits', data=self.motif_converged_dataset)
ds_nearest_neighbors = MicroDataset('Nearest_Neighbor_Indices',
data=self.closest_neighbors_mat, dtype=np.uint32)
dgrp_atom_finding = MicroDataGroup(self.atom_grp.name.split('/')[-1], parent=self.atom_grp.parent.name)
ds_atom_guesses = VirtualDataset('Gaussian_Guesses', data=self.guess_dataset)
ds_atom_fits = VirtualDataset('Gaussian_Fits', data=self.fit_dataset)
ds_motif_guesses = VirtualDataset('Motif_Guesses', data=self.motif_guess_dataset)
ds_motif_fits = VirtualDataset('Motif_Fits', data=self.motif_converged_dataset)
ds_nearest_neighbors = VirtualDataset('Nearest_Neighbor_Indices',
data=self.closest_neighbors_mat, dtype=np.uint32)
dgrp_atom_finding = VirtualGroup(self.atom_grp.name.split('/')[-1], parent=self.atom_grp.parent.name)
dgrp_atom_finding.attrs = self.fitting_parms
dgrp_atom_finding.add_children([ds_atom_guesses, ds_atom_fits, ds_motif_guesses,
ds_motif_fits, ds_nearest_neighbors])
......
from . import hdf_writer
from . import microdata
from . import virtual_data
from . import pycro_data
from . import translator
from . import numpy_translator
......@@ -10,10 +10,10 @@ from . import dtype_utils
from . import write_utils
from .hdf_writer import HDFwriter
from .microdata import *
from .virtual_data import *
from .pycro_data import PycroDataset
from .translator import *
from .numpy_translator import NumpyTranslator
__all__ = ['HDFwriter', 'MicroDataset', 'MicroDataGroup', 'PycroDataset', 'hdf_utils', 'io_utils', 'dtype_utils',
__all__ = ['HDFwriter', 'VirtualDataset', 'VirtualGroup', 'PycroDataset', 'hdf_utils', 'io_utils', 'dtype_utils',
'NumpyTranslator', 'write_utils']
......@@ -12,7 +12,7 @@ import collections
from warnings import warn
from collections import Iterable
import numpy as np
from .microdata import MicroDataset
from .virtual_data import VirtualDataset
from .write_utils import make_indices_matrix
__all__ = ['get_attr', 'get_h5_obj_refs', 'get_indices_for_region_ref', 'get_dimensionality', 'get_sort_order',
......@@ -1508,9 +1508,9 @@ def build_reduced_spec_dsets(h5_spec_inds, h5_spec_vals, keep_dim, step_starts,
Returns
-------
ds_inds : MicroDataset
ds_inds : VirtualDataset
Reduced Spectroscopic indices dataset
ds_vals : MicroDataset
ds_vals : VirtualDataset
Reduces Spectroscopic values dataset
"""
for param in [h5_spec_inds, h5_spec_vals]:
......@@ -1527,11 +1527,11 @@ def build_reduced_spec_dsets(h5_spec_inds, h5_spec_vals, keep_dim, step_starts,
ind_mat = h5_spec_inds[keep_dim, :][:, step_starts]
val_mat = h5_spec_vals[keep_dim, :][:, step_starts]
'''
Create new MicroDatasets to hold the data
Create new VirtualDatasets to hold the data
Name them based on basename
'''
ds_inds = MicroDataset(basename + '_Indices', ind_mat, dtype=h5_spec_inds.dtype)
ds_vals = MicroDataset(basename + '_Values', val_mat, dtype=h5_spec_vals.dtype)
ds_inds = VirtualDataset(basename + '_Indices', ind_mat, dtype=h5_spec_inds.dtype)
ds_vals = VirtualDataset(basename + '_Values', val_mat, dtype=h5_spec_vals.dtype)
# Extracting the labels from the original spectroscopic data sets
sho_inds_labs = h5_spec_inds.attrs['labels'][keep_dim]
# Creating the dimension slices for the new spectroscopic data sets
......@@ -1546,8 +1546,8 @@ def build_reduced_spec_dsets(h5_spec_inds, h5_spec_vals, keep_dim, step_starts,
ds_vals.attrs['units'] = h5_spec_vals.attrs['units'][keep_dim]
else: # Single spectroscopic dimension:
ds_inds = MicroDataset('Spectroscopic_Indices', np.array([[0]], dtype=np.uint32))
ds_vals = MicroDataset('Spectroscopic_Values', np.array([[0]], dtype=np.float32))
ds_inds = VirtualDataset('Spectroscopic_Indices', np.array([[0]], dtype=np.uint32))
ds_vals = VirtualDataset('Spectroscopic_Values', np.array([[0]], dtype=np.float32))
ds_inds.attrs['labels'] = {'Single_Step': (slice(0, None), slice(None))}
ds_vals.attrs['labels'] = {'Single_Step': (slice(0, None), slice(None))}
......
......@@ -15,7 +15,7 @@ from warnings import warn
import h5py
import numpy as np
from .microdata import MicroDataGroup, MicroDataset, MicroData
from .virtual_data import VirtualGroup, VirtualDataset, VirtualData
from ..__version__ import version
if sys.version_info.major == 3:
......@@ -178,26 +178,26 @@ class HDFwriter(object):
h5_file.attrs['Pycroscopy version'] = version
# Checking if the data is a MicroDataGroup object
if not isinstance(data, MicroData):
# Checking if the data is a VirtualGroup object
if not isinstance(data, VirtualData):
raise TypeError('Input expected to be of type MicroData but is of type: {} \n'.format(type(data)))
if isinstance(data, MicroDataset):
if isinstance(data, VirtualDataset):
# just want to write a single dataset:
try:
h5_parent = h5_file[data.parent]
except KeyError:
raise KeyError('Parent ({}) of provided MicroDataset ({}) does not exist in the '
raise KeyError('Parent ({}) of provided VirtualDataset ({}) does not exist in the '
'file'.format(data.parent, data.name))
h5_dset = HDFwriter._create_dataset(h5_parent, data, print_log=print_log)
return [h5_dset]
assert isinstance(data, MicroDataGroup) # just to avoid PEP8 warning
assert isinstance(data, VirtualGroup) # just to avoid PEP8 warning
# Populating the tree structure recursively
ref_list = []
# Figuring out if the first item in MicroDataGroup tree is file or group
# Figuring out if the first item in VirtualGroup tree is file or group
if data.name == '' and data.parent == '/':
# For file we just write the attributes
HDFwriter._write_simple_attrs(h5_file, data.attrs, obj_type='file', print_log=print_log)
......@@ -216,7 +216,7 @@ class HDFwriter(object):
Parameters
----------
child : MicroDataGroup object
child : VirtualGroup object
tree to be written
parent : h5py.Group or h5py.File object
HDF5 object to build tree under
......@@ -231,7 +231,7 @@ class HDFwriter(object):
h5_parent_group = h5_file[parent]
if isinstance(child, MicroDataGroup):
if isinstance(child, VirtualGroup):
h5_obj = HDFwriter._create_group(h5_parent_group, child, print_log=print_log)
# here we do the recursive function call
for ch in child.children:
......@@ -256,13 +256,13 @@ class HDFwriter(object):
@staticmethod
def _create_group(h5_parent_group, micro_group, print_log=False):
"""
Creates a h5py.Group object from the provided MicroDataGroup object under h5_new_group and writes all attributes
Creates a h5py.Group object from the provided VirtualGroup object under h5_new_group and writes all attributes
Parameters
----------
h5_parent_group : h5py.Group object
Parent group under which the new group object will be created
micro_group : MicroDataGroup object
micro_group : VirtualGroup object
Definition for the new group
print_log : bool, optional. Default=False
Whether or not to print debugging statements
......@@ -272,9 +272,9 @@ class HDFwriter(object):
h5_new_group : h5py.Group
The newly created group
"""
if not isinstance(micro_group, MicroDataGroup):
if not isinstance(micro_group, VirtualGroup):
HDFwriter.__safe_abort(h5_parent_group.file)
raise TypeError('micro_group should be a MicroDataGroup object but is instead of type '
raise TypeError('micro_group should be a VirtualGroup object but is instead of type '
'{}'.format(type(micro_group)))
if not isinstance(h5_parent_group, h5py.Group):
raise TypeError('h5_parent_group should be a h5py.Group object but is instead of type '
......@@ -282,7 +282,7 @@ class HDFwriter(object):
if micro_group.name == '':
HDFwriter.__safe_abort(h5_parent_group.file)
raise ValueError('MicroDataGroup object with empty name will not be handled by this function')
raise ValueError('VirtualGroup object with empty name will not be handled by this function')
# First complete the name of the group by adding the index suffix
if micro_group.indexed:
......@@ -364,7 +364,7 @@ class HDFwriter(object):
----------
h5_group : h5py.File or h5py.Group object
Parent under which this dataset will be created
microdset : MicroDataset object
microdset : VirtualDataset object
Definition for the dataset
Returns
......@@ -372,9 +372,9 @@ class HDFwriter(object):
h5_dset : h5py.Dataset object
Newly created datset object
"""
if not isinstance(microdset, MicroDataset):
if not isinstance(microdset, VirtualDataset):
HDFwriter.__safe_abort(h5_group.file)
raise TypeError('microdset should be a MicroDataGroup object but is instead of type '
raise TypeError('microdset should be a VirtualGroup object but is instead of type '
'{}'.format(type(microdset)))
if not isinstance(h5_group, (h5py.Group, h5py.File)):
raise TypeError('h5_group should be a h5py.Group or h5py.File object but is instead of type '
......@@ -398,7 +398,7 @@ class HDFwriter(object):
----------
h5_group : h5py.File or h5py.Group object
Parent under which this dataset will be created
microdset : MicroDataset object
microdset :