Commit d4bb3f05 authored by Unknown's avatar Unknown
Browse files

Categorize publication list by instrument

parent a6cc60df
{ {
"cells": [ "cells": [
{ {
"execution_count": null,
"cell_type": "code", "cell_type": "code",
"outputs": [], "execution_count": null,
"metadata": { "metadata": {
"collapsed": false "collapsed": false
}, },
"outputs": [],
"source": [ "source": [
"%matplotlib inline" "%matplotlib inline"
] ]
...@@ -26,12 +26,12 @@ ...@@ -26,12 +26,12 @@
] ]
}, },
{ {
"execution_count": null,
"cell_type": "code", "cell_type": "code",
"outputs": [], "execution_count": null,
"metadata": { "metadata": {
"collapsed": false "collapsed": false
}, },
"outputs": [],
"source": [ "source": [
"from __future__ import division, print_function, absolute_import, unicode_literals\n\n# The package for accessing files in directories, etc.:\nimport os\n\n# Warning package in case something goes wrong\nfrom warnings import warn\n\n# Package for downloading online files:\ntry:\n # This package is not part of anaconda and may need to be installed.\n import wget\nexcept ImportError:\n warn('wget not found. Will install with pip.')\n import pip\n pip.main(['install', 'wget'])\n import wget\n\n# The mathematical computation package:\nimport numpy as np\nfrom numpy import exp, abs, sqrt, sum, real, imag, arctan2, append\n\n# The package used for creating and manipulating HDF5 files:\nimport h5py\n\n# Packages for plotting:\nimport matplotlib.pyplot as plt\n\n# Finally import pycroscopy for certain scientific analysis:\ntry:\n import pycroscopy as px\nexcept ImportError:\n warn('pycroscopy not found. Will install with pip.')\n import pip\n pip.main(['install', 'pycroscopy'])\n import pycroscopy as px\n\n\nfield_names = ['Amplitude [V]', 'Frequency [Hz]', 'Quality Factor', 'Phase [rad]']\nsho32 = np.dtype({'names': field_names,\n 'formats': [np.float32 for name in field_names]})" "from __future__ import division, print_function, absolute_import, unicode_literals\n\n# The package for accessing files in directories, etc.:\nimport os\n\n# Warning package in case something goes wrong\nfrom warnings import warn\n\n# Package for downloading online files:\ntry:\n # This package is not part of anaconda and may need to be installed.\n import wget\nexcept ImportError:\n warn('wget not found. Will install with pip.')\n import pip\n pip.main(['install', 'wget'])\n import wget\n\n# The mathematical computation package:\nimport numpy as np\nfrom numpy import exp, abs, sqrt, sum, real, imag, arctan2, append\n\n# The package used for creating and manipulating HDF5 files:\nimport h5py\n\n# Packages for plotting:\nimport matplotlib.pyplot as plt\n\n# Finally import pycroscopy for certain scientific analysis:\ntry:\n import pycroscopy as px\nexcept ImportError:\n warn('pycroscopy not found. Will install with pip.')\n import pip\n pip.main(['install', 'pycroscopy'])\n import pycroscopy as px\n\n\nfield_names = ['Amplitude [V]', 'Frequency [Hz]', 'Quality Factor', 'Phase [rad]']\nsho32 = np.dtype({'names': field_names,\n 'formats': [np.float32 for name in field_names]})"
] ]
...@@ -44,12 +44,12 @@ ...@@ -44,12 +44,12 @@
] ]
}, },
{ {
"execution_count": null,
"cell_type": "code", "cell_type": "code",
"outputs": [], "execution_count": null,
"metadata": { "metadata": {
"collapsed": false "collapsed": false
}, },
"outputs": [],
"source": [ "source": [
"class ShoGuess(px.Process):\n\n def __init__(self, h5_main, cores=None):\n \"\"\"\n Validate the inputs and set some parameters\n\n Parameters\n ----------\n h5_main - dataset to compute on\n cores - Number of CPU cores to use for computation - Optional\n \"\"\"\n super(ShoGuess, self).__init__(h5_main, cores)\n\n # find the frequency vector\n h5_spec_vals = px.hdf_utils.getAuxData(h5_main, 'Spectroscopic_Values')[-1]\n self.freq_vec = np.squeeze(h5_spec_vals.value) * 1E-3\n\n def _create_results_datasets(self):\n \"\"\"\n Creates the datasets an datagroups necessary to store the results.\n Just as the raw data is stored in the pycroscopy format, the results also need to conform to the same\n standards. Hence, the create_datasets function can appear to be a little longer than one might expect.\n \"\"\"\n h5_spec_inds = px.hdf_utils.getAuxData(self.h5_main, auxDataName=['Spectroscopic_Indices'])[0]\n h5_spec_vals = px.hdf_utils.getAuxData(self.h5_main, auxDataName=['Spectroscopic_Values'])[0]\n\n self.step_start_inds = np.where(h5_spec_inds[0] == 0)[0]\n self.num_udvs_steps = len(self.step_start_inds)\n \n ds_guess = px.MicroDataset('Guess', data=[],\n maxshape=(self.h5_main.shape[0], self.num_udvs_steps),\n chunking=(1, self.num_udvs_steps), dtype=sho32)\n\n not_freq = px.hdf_utils.get_attr(h5_spec_inds, 'labels') != 'Frequency'\n\n ds_sho_inds, ds_sho_vals = px.hdf_utils.buildReducedSpec(h5_spec_inds, h5_spec_vals, not_freq,\n self.step_start_inds)\n\n dset_name = self.h5_main.name.split('/')[-1]\n sho_grp = px.MicroDataGroup('-'.join([dset_name, 'SHO_Fit_']), self.h5_main.parent.name[1:])\n sho_grp.addChildren([ds_guess, ds_sho_inds, ds_sho_vals])\n sho_grp.attrs['SHO_guess_method'] = \"pycroscopy BESHO\"\n\n h5_sho_grp_refs = self.hdf.writeData(sho_grp)\n\n self.h5_guess = px.hdf_utils.getH5DsetRefs(['Guess'], h5_sho_grp_refs)[0]\n self.h5_results_grp = self.h5_guess.parent\n h5_sho_inds = px.hdf_utils.getH5DsetRefs(['Spectroscopic_Indices'],\n h5_sho_grp_refs)[0]\n h5_sho_vals = px.hdf_utils.getH5DsetRefs(['Spectroscopic_Values'],\n h5_sho_grp_refs)[0]\n\n # Reference linking before actual fitting\n px.hdf_utils.linkRefs(self.h5_guess, [h5_sho_inds, h5_sho_vals])\n # Linking ancillary position datasets:\n aux_dsets = px.hdf_utils.getAuxData(self.h5_main, auxDataName=['Position_Indices', 'Position_Values'])\n px.hdf_utils.linkRefs(self.h5_guess, aux_dsets)\n print('Finshed creating datasets')\n\n def compute(self, *args, **kwargs):\n \"\"\"\n Apply the unit_function to the entire dataset. Here, we simply extend the existing compute function and only\n pass the parameters for the unit function. In this case, the only parameter is the frequency vector.\n\n Parameters\n ----------\n args\n kwargs\n\n Returns\n -------\n\n \"\"\"\n return super(ShoGuess, self).compute(w_vec=self.freq_vec)\n\n def _write_results_chunk(self):\n \"\"\"\n Write the computed results back to the H5 file\n \"\"\"\n # converting from a list to a 2D numpy array\n self._results = np.array(self._results, dtype=np.float32)\n self.h5_guess[:, 0] = px.io_utils.realToCompound(self._results, sho32)\n\n # Now update the start position\n self._start_pos = self._end_pos\n # this should stop the computation.\n\n @staticmethod\n def _unit_function():\n\n return px.be_sho.SHOestimateGuess" "class ShoGuess(px.Process):\n\n def __init__(self, h5_main, cores=None):\n \"\"\"\n Validate the inputs and set some parameters\n\n Parameters\n ----------\n h5_main - dataset to compute on\n cores - Number of CPU cores to use for computation - Optional\n \"\"\"\n super(ShoGuess, self).__init__(h5_main, cores)\n\n # find the frequency vector\n h5_spec_vals = px.hdf_utils.getAuxData(h5_main, 'Spectroscopic_Values')[-1]\n self.freq_vec = np.squeeze(h5_spec_vals.value) * 1E-3\n\n def _create_results_datasets(self):\n \"\"\"\n Creates the datasets an datagroups necessary to store the results.\n Just as the raw data is stored in the pycroscopy format, the results also need to conform to the same\n standards. Hence, the create_datasets function can appear to be a little longer than one might expect.\n \"\"\"\n h5_spec_inds = px.hdf_utils.getAuxData(self.h5_main, auxDataName=['Spectroscopic_Indices'])[0]\n h5_spec_vals = px.hdf_utils.getAuxData(self.h5_main, auxDataName=['Spectroscopic_Values'])[0]\n\n self.step_start_inds = np.where(h5_spec_inds[0] == 0)[0]\n self.num_udvs_steps = len(self.step_start_inds)\n \n ds_guess = px.MicroDataset('Guess', data=[],\n maxshape=(self.h5_main.shape[0], self.num_udvs_steps),\n chunking=(1, self.num_udvs_steps), dtype=sho32)\n\n not_freq = px.hdf_utils.get_attr(h5_spec_inds, 'labels') != 'Frequency'\n\n ds_sho_inds, ds_sho_vals = px.hdf_utils.buildReducedSpec(h5_spec_inds, h5_spec_vals, not_freq,\n self.step_start_inds)\n\n dset_name = self.h5_main.name.split('/')[-1]\n sho_grp = px.MicroDataGroup('-'.join([dset_name, 'SHO_Fit_']), self.h5_main.parent.name[1:])\n sho_grp.addChildren([ds_guess, ds_sho_inds, ds_sho_vals])\n sho_grp.attrs['SHO_guess_method'] = \"pycroscopy BESHO\"\n\n h5_sho_grp_refs = self.hdf.writeData(sho_grp)\n\n self.h5_guess = px.hdf_utils.getH5DsetRefs(['Guess'], h5_sho_grp_refs)[0]\n self.h5_results_grp = self.h5_guess.parent\n h5_sho_inds = px.hdf_utils.getH5DsetRefs(['Spectroscopic_Indices'],\n h5_sho_grp_refs)[0]\n h5_sho_vals = px.hdf_utils.getH5DsetRefs(['Spectroscopic_Values'],\n h5_sho_grp_refs)[0]\n\n # Reference linking before actual fitting\n px.hdf_utils.linkRefs(self.h5_guess, [h5_sho_inds, h5_sho_vals])\n # Linking ancillary position datasets:\n aux_dsets = px.hdf_utils.getAuxData(self.h5_main, auxDataName=['Position_Indices', 'Position_Values'])\n px.hdf_utils.linkRefs(self.h5_guess, aux_dsets)\n print('Finshed creating datasets')\n\n def compute(self, *args, **kwargs):\n \"\"\"\n Apply the unit_function to the entire dataset. Here, we simply extend the existing compute function and only\n pass the parameters for the unit function. In this case, the only parameter is the frequency vector.\n\n Parameters\n ----------\n args\n kwargs\n\n Returns\n -------\n\n \"\"\"\n return super(ShoGuess, self).compute(w_vec=self.freq_vec)\n\n def _write_results_chunk(self):\n \"\"\"\n Write the computed results back to the H5 file\n \"\"\"\n # converting from a list to a 2D numpy array\n self._results = np.array(self._results, dtype=np.float32)\n self.h5_guess[:, 0] = px.io_utils.realToCompound(self._results, sho32)\n\n # Now update the start position\n self._start_pos = self._end_pos\n # this should stop the computation.\n\n @staticmethod\n def _unit_function():\n\n return px.be_sho.SHOestimateGuess"
] ]
...@@ -62,23 +62,23 @@ ...@@ -62,23 +62,23 @@
] ]
}, },
{ {
"execution_count": null,
"cell_type": "code", "cell_type": "code",
"outputs": [], "execution_count": null,
"metadata": { "metadata": {
"collapsed": false "collapsed": false
}, },
"outputs": [],
"source": [ "source": [
"# download the raw data file from Github:\nh5_path = 'temp.h5'\nurl = 'https://raw.githubusercontent.com/pycroscopy/pycroscopy/master/data/BELine_0004.h5'\nif os.path.exists(h5_path):\n os.remove(h5_path)\n_ = wget.download(url, h5_path, bar=None)" "# download the raw data file from Github:\nh5_path = 'temp.h5'\nurl = 'https://raw.githubusercontent.com/pycroscopy/pycroscopy/master/data/BELine_0004.h5'\nif os.path.exists(h5_path):\n os.remove(h5_path)\n_ = wget.download(url, h5_path, bar=None)"
] ]
}, },
{ {
"execution_count": null,
"cell_type": "code", "cell_type": "code",
"outputs": [], "execution_count": null,
"metadata": { "metadata": {
"collapsed": false "collapsed": false
}, },
"outputs": [],
"source": [ "source": [
"# Open the file in read-only mode\nh5_file = h5py.File(h5_path, mode='r+')\n\n# Get handles to the the raw data along with other datasets and datagroups that contain necessary parameters\nh5_meas_grp = h5_file['Measurement_000']\nnum_rows = px.hdf_utils.get_attr(h5_meas_grp, 'grid_num_rows')\nnum_cols = px.hdf_utils.get_attr(h5_meas_grp, 'grid_num_cols')\n\n# Getting a reference to the main dataset:\nh5_main = h5_meas_grp['Channel_000/Raw_Data']\n\n# Extracting the X axis - vector of frequencies\nh5_spec_vals = px.hdf_utils.getAuxData(h5_main, 'Spectroscopic_Values')[-1]\nfreq_vec = np.squeeze(h5_spec_vals.value) * 1E-3" "# Open the file in read-only mode\nh5_file = h5py.File(h5_path, mode='r+')\n\n# Get handles to the the raw data along with other datasets and datagroups that contain necessary parameters\nh5_meas_grp = h5_file['Measurement_000']\nnum_rows = px.hdf_utils.get_attr(h5_meas_grp, 'grid_num_rows')\nnum_cols = px.hdf_utils.get_attr(h5_meas_grp, 'grid_num_cols')\n\n# Getting a reference to the main dataset:\nh5_main = h5_meas_grp['Channel_000/Raw_Data']\n\n# Extracting the X axis - vector of frequencies\nh5_spec_vals = px.hdf_utils.getAuxData(h5_main, 'Spectroscopic_Values')[-1]\nfreq_vec = np.squeeze(h5_spec_vals.value) * 1E-3"
] ]
...@@ -91,12 +91,12 @@ ...@@ -91,12 +91,12 @@
] ]
}, },
{ {
"execution_count": null,
"cell_type": "code", "cell_type": "code",
"outputs": [], "execution_count": null,
"metadata": { "metadata": {
"collapsed": false "collapsed": false
}, },
"outputs": [],
"source": [ "source": [
"fitter = ShoGuess(h5_main, cores=1)\nh5_results_grp = fitter.compute()\nh5_guess = h5_results_grp['Guess']\n\nrow_ind, col_ind = 103, 19\npix_ind = col_ind + row_ind * num_cols\nresp_vec = h5_main[pix_ind]\nnorm_guess_parms = h5_guess[pix_ind]\n\n# Converting from compound to real:\nnorm_guess_parms = px.io_utils.compound_to_scalar(norm_guess_parms)\nprint('Functional fit returned:', norm_guess_parms)\nnorm_resp = px.be_sho.SHOfunc(norm_guess_parms, freq_vec)" "fitter = ShoGuess(h5_main, cores=1)\nh5_results_grp = fitter.compute()\nh5_guess = h5_results_grp['Guess']\n\nrow_ind, col_ind = 103, 19\npix_ind = col_ind + row_ind * num_cols\nresp_vec = h5_main[pix_ind]\nnorm_guess_parms = h5_guess[pix_ind]\n\n# Converting from compound to real:\nnorm_guess_parms = px.io_utils.compound_to_scalar(norm_guess_parms)\nprint('Functional fit returned:', norm_guess_parms)\nnorm_resp = px.be_sho.SHOfunc(norm_guess_parms, freq_vec)"
] ]
...@@ -109,12 +109,12 @@ ...@@ -109,12 +109,12 @@
] ]
}, },
{ {
"execution_count": null,
"cell_type": "code", "cell_type": "code",
"outputs": [], "execution_count": null,
"metadata": { "metadata": {
"collapsed": false "collapsed": false
}, },
"outputs": [],
"source": [ "source": [
"fig, axes = plt.subplots(nrows=2, sharex=True, figsize=(5, 10))\nfor axis, func, title in zip(axes.flat, [np.abs, np.angle], ['Amplitude (a.u.)', 'Phase (rad)']):\n axis.scatter(freq_vec, func(resp_vec), c='red', label='Measured')\n axis.plot(freq_vec, func(norm_resp), 'black', lw=3, label='Guess')\n axis.set_title(title, fontsize=16)\n axis.legend(fontsize=14)\n\naxes[1].set_xlabel('Frequency (kHz)', fontsize=14)\naxes[0].set_ylim([0, np.max(np.abs(resp_vec)) * 1.1])\naxes[1].set_ylim([-np.pi, np.pi])" "fig, axes = plt.subplots(nrows=2, sharex=True, figsize=(5, 10))\nfor axis, func, title in zip(axes.flat, [np.abs, np.angle], ['Amplitude (a.u.)', 'Phase (rad)']):\n axis.scatter(freq_vec, func(resp_vec), c='red', label='Measured')\n axis.plot(freq_vec, func(norm_resp), 'black', lw=3, label='Guess')\n axis.set_title(title, fontsize=16)\n axis.legend(fontsize=14)\n\naxes[1].set_xlabel('Frequency (kHz)', fontsize=14)\naxes[0].set_ylim([0, np.max(np.abs(resp_vec)) * 1.1])\naxes[1].set_ylim([-np.pi, np.pi])"
] ]
...@@ -127,36 +127,36 @@ ...@@ -127,36 +127,36 @@
] ]
}, },
{ {
"execution_count": null,
"cell_type": "code", "cell_type": "code",
"outputs": [], "execution_count": null,
"metadata": { "metadata": {
"collapsed": false "collapsed": false
}, },
"outputs": [],
"source": [ "source": [
"h5_file.close()\nos.remove(h5_path)" "h5_file.close()\nos.remove(h5_path)"
] ]
} }
], ],
"nbformat": 4,
"nbformat_minor": 0,
"metadata": { "metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {
"version": 3, "name": "ipython",
"name": "ipython" "version": 3
}, },
"nbconvert_exporter": "python",
"file_extension": ".py", "file_extension": ".py",
"version": "3.5.2", "mimetype": "text/x-python",
"pygments_lexer": "ipython3",
"name": "python", "name": "python",
"mimetype": "text/x-python" "nbconvert_exporter": "python",
}, "pygments_lexer": "ipython3",
"kernelspec": { "version": "3.6.3"
"display_name": "Python 3",
"language": "python",
"name": "python3"
} }
} },
"nbformat": 4,
"nbformat_minor": 0
} }
\ No newline at end of file
f67a05d901ee5392a9f9a2f90a103dae b11b6c6272c5cb4b8cb9e52cfb61aeec
\ No newline at end of file \ No newline at end of file
...@@ -345,7 +345,7 @@ Plot the Amplitude and Phase of the gaussian versus the raw data. ...@@ -345,7 +345,7 @@ Plot the Amplitude and Phase of the gaussian versus the raw data.
**Total running time of the script:** ( 0 minutes 57.835 seconds) **Total running time of the script:** ( 0 minutes 43.870 seconds)
......
...@@ -201,13 +201,13 @@ User Tutorials ...@@ -201,13 +201,13 @@ User Tutorials
.. container:: sphx-glr-download .. container:: sphx-glr-download
:download:`Download all examples in Python source code: auto_examples_python.zip <//home/challtdow/workspace/pycroscopy/docs/auto_examples/auto_examples_python.zip>` :download:`Download all examples in Python source code: auto_examples_python.zip </C:/Users/cq6/git/Pycroscopy/pycroscopy/docs/auto_examples/auto_examples_python.zip>`
.. container:: sphx-glr-download .. container:: sphx-glr-download
:download:`Download all examples in Jupyter notebooks: auto_examples_jupyter.zip <//home/challtdow/workspace/pycroscopy/docs/auto_examples/auto_examples_jupyter.zip>` :download:`Download all examples in Jupyter notebooks: auto_examples_jupyter.zip </C:/Users/cq6/git/Pycroscopy/pycroscopy/docs/auto_examples/auto_examples_jupyter.zip>`
.. only:: html .. only:: html
......
{ {
"cells": [ "cells": [
{ {
"execution_count": null,
"cell_type": "code", "cell_type": "code",
"outputs": [], "execution_count": null,
"metadata": { "metadata": {
"collapsed": false "collapsed": false
}, },
"outputs": [],
"source": [ "source": [
"%matplotlib inline" "%matplotlib inline"
] ]
...@@ -19,12 +19,12 @@ ...@@ -19,12 +19,12 @@
] ]
}, },
{ {
"execution_count": null,
"cell_type": "code", "cell_type": "code",
"outputs": [], "execution_count": null,
"metadata": { "metadata": {
"collapsed": false "collapsed": false
}, },
"outputs": [],
"source": [ "source": [
"# Code source: Chris Smith -- cq6@ornl.gov\n# Liscense: MIT\n\nimport os\nimport numpy as np\nimport pycroscopy as px" "# Code source: Chris Smith -- cq6@ornl.gov\n# Liscense: MIT\n\nimport os\nimport numpy as np\nimport pycroscopy as px"
] ]
...@@ -37,12 +37,12 @@ ...@@ -37,12 +37,12 @@
] ]
}, },
{ {
"execution_count": null,
"cell_type": "code", "cell_type": "code",
"outputs": [], "execution_count": null,
"metadata": { "metadata": {
"collapsed": false "collapsed": false
}, },
"outputs": [],
"source": [ "source": [
"# First create some data\ndata1 = np.random.rand(5, 7)" "# First create some data\ndata1 = np.random.rand(5, 7)"
] ]
...@@ -55,12 +55,12 @@ ...@@ -55,12 +55,12 @@
] ]
}, },
{ {
"execution_count": null,
"cell_type": "code", "cell_type": "code",
"outputs": [], "execution_count": null,
"metadata": { "metadata": {
"collapsed": false "collapsed": false
}, },
"outputs": [],
"source": [ "source": [
"ds_main = px.MicroDataset('Main_Data', data=data1, parent='/')" "ds_main = px.MicroDataset('Main_Data', data=data1, parent='/')"
] ]
...@@ -73,12 +73,12 @@ ...@@ -73,12 +73,12 @@
] ]
}, },
{ {
"execution_count": null,
"cell_type": "code", "cell_type": "code",
"outputs": [], "execution_count": null,
"metadata": { "metadata": {
"collapsed": false "collapsed": false
}, },
"outputs": [],
"source": [ "source": [
"ds_empty = px.MicroDataset('Empty_Data', data=[], dtype=np.float32, maxshape=[7, 5, 3])" "ds_empty = px.MicroDataset('Empty_Data', data=[], dtype=np.float32, maxshape=[7, 5, 3])"
] ]
...@@ -91,12 +91,12 @@ ...@@ -91,12 +91,12 @@
] ]
}, },
{ {
"execution_count": null,
"cell_type": "code", "cell_type": "code",
"outputs": [], "execution_count": null,
"metadata": { "metadata": {
"collapsed": false "collapsed": false
}, },
"outputs": [],
"source": [ "source": [
"data_group = px.MicroDataGroup('Data_Group', parent='/')\n\nroot_group = px.MicroDataGroup('/')\n\n# After creating the group, we then add an existing object as its child.\ndata_group.addChildren([ds_empty])\nroot_group.addChildren([ds_main, data_group])" "data_group = px.MicroDataGroup('Data_Group', parent='/')\n\nroot_group = px.MicroDataGroup('/')\n\n# After creating the group, we then add an existing object as its child.\ndata_group.addChildren([ds_empty])\nroot_group.addChildren([ds_main, data_group])"
] ]
...@@ -109,12 +109,12 @@ ...@@ -109,12 +109,12 @@
] ]
}, },
{ {
"execution_count": null,
"cell_type": "code", "cell_type": "code",
"outputs": [], "execution_count": null,
"metadata": { "metadata": {
"collapsed": false "collapsed": false
}, },
"outputs": [],
"source": [ "source": [
"root_group.showTree()" "root_group.showTree()"
] ]
...@@ -127,12 +127,12 @@ ...@@ -127,12 +127,12 @@
] ]
}, },
{ {
"execution_count": null,
"cell_type": "code", "cell_type": "code",
"outputs": [], "execution_count": null,
"metadata": { "metadata": {
"collapsed": false "collapsed": false
}, },
"outputs": [],
"source": [ "source": [
"# First we specify the path to the file\nh5_path = 'microdata_test.h5'\n\n# Then we use the ioHDF5 class to build the file from our objects.\nhdf = px.ioHDF5(h5_path)" "# First we specify the path to the file\nh5_path = 'microdata_test.h5'\n\n# Then we use the ioHDF5 class to build the file from our objects.\nhdf = px.ioHDF5(h5_path)"
] ]
...@@ -145,12 +145,12 @@ ...@@ -145,12 +145,12 @@
] ]
}, },
{ {
"execution_count": null,
"cell_type": "code", "cell_type": "code",
"outputs": [], "execution_count": null,
"metadata": { "metadata": {
"collapsed": false "collapsed": false
}, },
"outputs": [],
"source": [ "source": [
"h5_refs = hdf.writeData(root_group, print_log=True)\n\n# We can use these references to get the h5py dataset and group objects\nh5_main = px.io.hdf_utils.getH5DsetRefs(['Main_Data'], h5_refs)[0]\nh5_empty = px.io.hdf_utils.getH5DsetRefs(['Empty_Data'], h5_refs)[0]" "h5_refs = hdf.writeData(root_group, print_log=True)\n\n# We can use these references to get the h5py dataset and group objects\nh5_main = px.io.hdf_utils.getH5DsetRefs(['Main_Data'], h5_refs)[0]\nh5_empty = px.io.hdf_utils.getH5DsetRefs(['Empty_Data'], h5_refs)[0]"
] ]
...@@ -163,12 +163,12 @@ ...@@ -163,12 +163,12 @@
] ]
}, },
{ {
"execution_count": null,
"cell_type": "code", "cell_type": "code",
"outputs": [], "execution_count": null,
"metadata": { "metadata": {
"collapsed": false "collapsed": false
}, },
"outputs": [],
"source": [ "source": [
"print(np.allclose(h5_main[()], data1))" "print(np.allclose(h5_main[()], data1))"
] ]
...@@ -181,12 +181,12 @@ ...@@ -181,12 +181,12 @@
] ]
}, },
{ {
"execution_count": null,
"cell_type": "code", "cell_type": "code",
"outputs": [], "execution_count": null,
"metadata": { "metadata": {
"collapsed": false "collapsed": false
}, },
"outputs": [],
"source": [ "source": [
"data2 = np.random.rand(*h5_empty.shape)\nh5_empty[:] = data2[:]" "data2 = np.random.rand(*h5_empty.shape)\nh5_empty[:] = data2[:]"
] ]
...@@ -199,12 +199,12 @@ ...@@ -199,12 +199,12 @@
] ]
}, },
{ {
"execution_count": null,
"cell_type": "code", "cell_type": "code",
"outputs": [], "execution_count": null,
"metadata": { "metadata": {
"collapsed": false "collapsed": false
}, },
"outputs": [],
"source": [ "source": [
"h5_file = hdf.file\nh5_file.flush()" "h5_file = hdf.file\nh5_file.flush()"
] ]
...@@ -217,36 +217,36 @@ ...@@ -217,36 +217,36 @@
] ]
}, },
{ {
"execution_count": null,
"cell_type": "code", "cell_type": "code",
"outputs": [], "execution_count": null,
"metadata": { "metadata": {
"collapsed": false "collapsed": false
}, },
"outputs": [],
"source": [ "source": [
"h5_file.close()\nos.remove(h5_path)" "h5_file.close()\nos.remove(h5_path)"
] ]
} }
], ],
"nbformat": 4,
"nbformat_minor": 0,
"metadata": { "metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {
"version": 3, "name": "ipython",
"name": "ipython" "version": 3
}, },