Commit 360a5a28 authored by Unknown's avatar Unknown
Browse files

Doc updates

parent 3e895b38
dac0d5ff5eb71d5408d037490b747ce8
\ No newline at end of file
74929e1fd582892c288cd4019fe50cac
\ No newline at end of file
......@@ -471,7 +471,7 @@ the slicing.
Out::
['X', 'Y', 'Frequency', 'DC_Offset', 'Field', 'Cycle']
[ 5 5 87 64 2 2]
[5, 5, 87, 64, 2, 2]
With this information, we can now get our data slice.
......@@ -751,7 +751,7 @@ approach.
**Total running time of the script:** ( 0 minutes 1.511 seconds)
**Total running time of the script:** ( 0 minutes 5.056 seconds)
......
{
"metadata": {
"language_info": {
"pygments_lexer": "ipython3",
"mimetype": "text/x-python",
"version": "3.5.2",
"nbconvert_exporter": "python",
"file_extension": ".py",
"codemirror_mode": {
"version": 3,
"name": "ipython"
},
"name": "python"
},
"kernelspec": {
"language": "python",
"display_name": "Python 3",
"name": "python3"
}
},
"nbformat": 4,
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"execution_count": null,
"outputs": [],
"source": [
"%matplotlib inline"
......@@ -20,10 +40,10 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"execution_count": null,
"outputs": [],
"source": [
"# Ensure python 3 compatibility:\nfrom __future__ import division, print_function, absolute_import, unicode_literals\n\n# The package for accessing files in directories, etc.:\nimport os\n\n# Warning package in case something goes wrong\nfrom warnings import warn\n\n# Package for downloading online files:\ntry:\n # This package is not part of anaconda and may need to be installed.\n import wget\nexcept ImportError:\n warn('wget not found. Will install with pip.')\n import pip\n pip.main(['install', 'wget'])\n import wget\n\n# The mathematical computation package:\nimport numpy as np\n\n# The package used for creating and manipulating HDF5 files:\nimport h5py\n\n# Packages for plotting:\nimport matplotlib.pyplot as plt\n\n# Parallel computation library:\ntry:\n import joblib\nexcept ImportError:\n warn('joblib not found. Will install with pip.')\n import pip\n pip.main(['install', 'joblib'])\n import joblib\n\n# Timing\nimport time\n\n# Finally import pycroscopy for certain scientific analysis:\ntry:\n import pycroscopy as px\nexcept ImportError:\n warn('pycroscopy not found. Will install with pip.')\n import pip\n pip.main(['install', 'pycroscopy'])\n import pycroscopy as px"
......@@ -38,10 +58,10 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"execution_count": null,
"outputs": [],
"source": [
"# download the raw data file from Github:\nh5_path = 'temp.h5'\nurl = 'https://raw.githubusercontent.com/pycroscopy/pycroscopy/master/data/BELine_0004.h5'\nif os.path.exists(h5_path):\n os.remove(h5_path)\n_ = wget.download(url, h5_path)"
......@@ -49,10 +69,10 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"execution_count": null,
"outputs": [],
"source": [
"# Open the file in read-only mode\nh5_file = h5py.File(h5_path, mode='r')\n\n# Get handles to the the raw data along with other datasets and datagroups that contain necessary parameters\nh5_meas_grp = h5_file['Measurement_000']\n\n# Getting a reference to the main dataset:\nh5_main = h5_meas_grp['Channel_000/Raw_Data']\nprint('\\nThe main dataset:\\n------------------------------------')\nprint(h5_main)\n\nnum_rows = px.hdf_utils.get_attr(h5_meas_grp, 'grid_num_rows')\nnum_cols = px.hdf_utils.get_attr(h5_meas_grp, 'grid_num_cols')\n\n# Extracting the X axis - vector of frequencies\nh5_spec_vals = px.hdf_utils.getAuxData(h5_main, 'Spectroscopic_Values')[-1]\nfreq_vec = np.squeeze(h5_spec_vals.value) * 1E-3"
......@@ -67,10 +87,10 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"execution_count": null,
"outputs": [],
"source": [
"px.viz.be_viz_utils.jupyter_visualize_be_spectrograms(h5_main)"
......@@ -92,10 +112,10 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"execution_count": null,
"outputs": [],
"source": [
"row_ind, col_ind = 103, 19\nresp_vec = h5_main[col_ind + row_ind*num_cols]\nnorm_guess_parms = px.analysis.be_sho.SHOestimateGuess(resp_vec, freq_vec)\nprint('Functional fit returned:', norm_guess_parms)\nnorm_resp = px.analysis.be_sho.SHOfunc(norm_guess_parms, freq_vec)\n\n\nfig, axes = plt.subplots(ncols=2, figsize=(10, 5))\nfor axis, func, title in zip(axes.flat, [np.abs, np.angle], ['Amplitude (a.u.)', 'Phase (rad)']):\n axis.scatter(freq_vec, func(resp_vec), c='red', label='Measured')\n axis.plot(freq_vec, func(norm_resp), 'black', lw=3, label='Guess')\n axis.set_title(title, fontsize=16)\n axis.legend(fontsize=14)\n axis.set_xlabel('Frequency (kHz)', fontsize=14)\n\naxes[0].set_ylim([0, np.max(np.abs(resp_vec))*1.1])\naxes[1].set_ylim([-np.pi, np.pi])"
......@@ -110,10 +130,10 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"execution_count": null,
"outputs": [],
"source": [
"raw_data = h5_main[()]\n\nserial_results = np.zeros((raw_data.shape[0], 4), dtype=np.float)"
......@@ -128,10 +148,10 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"execution_count": null,
"outputs": [],
"source": [
"t_0 = time.time()\nfor pix_ind in range(raw_data.shape[0]):\n serial_results[pix_ind] = px.analysis.be_sho.SHOestimateGuess(raw_data[pix_ind], freq_vec)\nprint('Serial computation took', np.round(time.time()-t_0, 2), ' seconds')"
......@@ -146,10 +166,10 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"execution_count": null,
"outputs": [],
"source": [
"func = px.analysis.be_sho.SHOestimateGuess\ncores = 4\nargs = freq_vec\n\nt_0 = time.time()\nvalues = [joblib.delayed(func)(x, args) for x in raw_data]\nparallel_results = joblib.Parallel(n_jobs=cores)(values)\nprint('Parallel computation took', np.round(time.time()-t_0, 2), ' seconds')"
......@@ -164,10 +184,10 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"execution_count": null,
"outputs": [],
"source": [
"row_ind, col_ind = 103, 19\npix_ind = col_ind + row_ind * num_cols\nprint('Parallel and serial computation results matching:',\n np.all(np.isclose(serial_results[pix_ind], parallel_results[pix_ind])))"
......@@ -189,35 +209,15 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"execution_count": null,
"outputs": [],
"source": [
"h5_file.close()\nos.remove(h5_path)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.3"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
\ No newline at end of file
10fb70dac1b5685df990219035b9aff0
\ No newline at end of file
277f27d3fc65f295a06f7c08e0230482
\ No newline at end of file
{
"metadata": {
"language_info": {
"pygments_lexer": "ipython3",
"mimetype": "text/x-python",
"version": "3.5.2",
"nbconvert_exporter": "python",
"file_extension": ".py",
"codemirror_mode": {
"version": 3,
"name": "ipython"
},
"name": "python"
},
"kernelspec": {
"language": "python",
"display_name": "Python 3",
"name": "python3"
}
},
"nbformat": 4,
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"execution_count": null,
"outputs": [],
"source": [
"%matplotlib inline"
......@@ -15,7 +35,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"\n=================================================================\nTutorial 5: Formalizing Data Processing\n=================================================================\n\n**Suhas Somnath**\n\n9/8/2017\n\n\nThis set of tutorials will serve as examples for developing end-to-end workflows for and using pycroscopy.\n\n**In this example, we will learn how to write a simple yet formal pycroscopy class for processing data.**\n\nIntroduction\n============\n\nData processing / analysis typically involves a few basic tasks:\n1. Reading data from file\n2. Computation\n3. Writing results to disk\n\nThis example is based on the parallel computing example where we fit a dataset containing spectra at each location to a\nfunction. While the previous example focused on comparing serial and parallel computing, we will focus on the framework\nthat needs to be built around a computation for robust data processing. As the example will show below, the framework\nessentially deals with careful file reading and writing.\n\nThe majority of the code for this example is based on the BESHOModel Class under pycroscopy.analysis\n\n"
"\n=======================================\nTutorial 5: Formalizing Data Processing\n=======================================\n\n**Suhas Somnath**\n\n9/8/2017\n\n\nThis set of tutorials will serve as examples for developing end-to-end workflows for and using pycroscopy.\n\n**In this example, we will learn how to write a simple yet formal pycroscopy class for processing data.**\n\nIntroduction\n============\n\nData processing / analysis typically involves a few basic tasks:\n1. Reading data from file\n2. Computation\n3. Writing results to disk\n\nThis example is based on the parallel computing example where we fit a dataset containing spectra at each location to a\nfunction. While the previous example focused on comparing serial and parallel computing, we will focus on the framework\nthat needs to be built around a computation for robust data processing. As the example will show below, the framework\nessentially deals with careful file reading and writing.\n\nThe majority of the code for this example is based on the BESHOModel Class under pycroscopy.analysis\n\n"
]
},
{
......@@ -27,10 +47,10 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"execution_count": null,
"outputs": [],
"source": [
"from __future__ import division, print_function, absolute_import, unicode_literals\n\n# The package for accessing files in directories, etc.:\nimport os\n\n# Warning package in case something goes wrong\nfrom warnings import warn\n\n# Package for downloading online files:\ntry:\n # This package is not part of anaconda and may need to be installed.\n import wget\nexcept ImportError:\n warn('wget not found. Will install with pip.')\n import pip\n pip.main(['install', 'wget'])\n import wget\n\n# The mathematical computation package:\nimport numpy as np\nfrom numpy import exp, abs, sqrt, sum, real, imag, arctan2, append\n\n# The package used for creating and manipulating HDF5 files:\nimport h5py\n\n# Packages for plotting:\nimport matplotlib.pyplot as plt\n\n# Finally import pycroscopy for certain scientific analysis:\ntry:\n import pycroscopy as px\nexcept ImportError:\n warn('pycroscopy not found. Will install with pip.')\n import pip\n pip.main(['install', 'pycroscopy'])\n import pycroscopy as px\n\n\nfield_names = ['Amplitude [V]', 'Frequency [Hz]', 'Quality Factor', 'Phase [rad]']\nsho32 = np.dtype({'names': field_names,\n 'formats': [np.float32 for name in field_names]})"
......@@ -45,13 +65,13 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"execution_count": null,
"outputs": [],
"source": [
"class ShoGuess(px.Process):\n\n def __init__(self, h5_main, cores=None):\n \"\"\"\n Validate the inputs and set some parameters\n\n Parameters\n ----------\n h5_main - dataset to compute on\n cores - Number of CPU cores to use for computation - Optional\n \"\"\"\n super(ShoGuess, self).__init__(h5_main, cores)\n\n # find the frequency vector\n h5_spec_vals = px.hdf_utils.getAuxData(h5_main, 'Spectroscopic_Values')[-1]\n self.freq_vec = np.squeeze(h5_spec_vals.value) * 1E-3\n\n def _create_results_datasets(self):\n \"\"\"\n Creates the datasets an datagroups necessary to store the results.\n Just as the raw data is stored in the pycroscopy format, the results also need to conform to the same\n standards. Hence, the create_datasets function can appear to be a little longer than one might expect.\n \"\"\"\n h5_spec_inds = px.hdf_utils.getAuxData(self.h5_main, auxDataName=['Spectroscopic_Indices'])[0]\n h5_spec_vals = px.hdf_utils.getAuxData(self.h5_main, auxDataName=['Spectroscopic_Values'])[0]\n\n self.step_start_inds = np.where(h5_spec_inds[0] == 0)[0]\n self.num_udvs_steps = len(self.step_start_inds)\n \n ds_guess = px.MicroDataset('Guess', data=[],\n maxshape=(self.h5_main.shape[0], self.num_udvs_steps),\n chunking=(1, self.num_udvs_steps), dtype=sho32)\n\n not_freq = px.hdf_utils.get_attr(h5_spec_inds, 'labels') != 'Frequency'\n\n ds_sho_inds, ds_sho_vals = px.hdf_utils.buildReducedSpec(h5_spec_inds, h5_spec_vals, not_freq,\n self.step_start_inds)\n\n dset_name = self.h5_main.name.split('/')[-1]\n sho_grp = px.MicroDataGroup('-'.join([dset_name, 'SHO_Fit_']), self.h5_main.parent.name[1:])\n sho_grp.addChildren([ds_guess, ds_sho_inds, ds_sho_vals])\n sho_grp.attrs['SHO_guess_method'] = \"pycroscopy BESHO\"\n\n h5_sho_grp_refs = self.hdf.writeData(sho_grp)\n\n self.h5_guess = px.hdf_utils.getH5DsetRefs(['Guess'], h5_sho_grp_refs)[0]\n self.h5_results_grp = self.h5_guess.parent\n h5_sho_inds = px.hdf_utils.getH5DsetRefs(['Spectroscopic_Indices'],\n h5_sho_grp_refs)[0]\n h5_sho_vals = px.hdf_utils.getH5DsetRefs(['Spectroscopic_Values'],\n h5_sho_grp_refs)[0]\n\n # Reference linking before actual fitting\n px.hdf_utils.linkRefs(self.h5_guess, [h5_sho_inds, h5_sho_vals])\n # Linking ancillary position datasets:\n aux_dsets = px.hdf_utils.getAuxData(self.h5_main, auxDataName=['Position_Indices', 'Position_Values'])\n px.hdf_utils.linkRefs(self.h5_guess, aux_dsets)\n print('Finshed creating datasets')\n\n def compute(self, *args, **kwargs):\n \"\"\"\n Apply the unit_function to the entire dataset. Here, we simply extend the existing compute function and only\n pass the parameters for the unit function. In this case, the only parameter is the frequency vector.\n\n Parameters\n ----------\n args\n kwargs\n\n Returns\n -------\n\n \"\"\"\n return super(ShoGuess, self).compute(w_vec=self.freq_vec)\n\n def _write_results_chunk(self):\n \"\"\"\n Write the computed results back to the H5 file\n \"\"\"\n # converting from a list to a 2D numpy array\n self._results = np.array(self._results, dtype=np.float32)\n self.h5_guess[:, 0] = px.io_utils.realToCompound(self._results, sho32)\n\n # Now update the start position\n self._start_pos = self._end_pos\n # this should stop the computation.\n\n @staticmethod\n def _unit_function():\n\n return px.be_sho.SHOestimateGuess"
"class ShoGuess(px.Process):\n\n def __init__(self, h5_main, cores=None):\n \"\"\"\n Validate the inputs and set some parameters\n\n Parameters\n ----------\n h5_main - dataset to compute on\n cores - Number of CPU cores to use for computation - Optional\n \"\"\"\n super(ShoGuess, self).__init__(h5_main, cores=cores)\n\n # find the frequency vector\n h5_spec_vals = px.hdf_utils.getAuxData(h5_main, 'Spectroscopic_Values')[-1]\n self.freq_vec = np.squeeze(h5_spec_vals.value) * 1E-3\n\n def _create_results_datasets(self):\n \"\"\"\n Creates the datasets an datagroups necessary to store the results.\n Just as the raw data is stored in the pycroscopy format, the results also need to conform to the same\n standards. Hence, the create_datasets function can appear to be a little longer than one might expect.\n \"\"\"\n h5_spec_inds = px.hdf_utils.getAuxData(self.h5_main, auxDataName=['Spectroscopic_Indices'])[0]\n h5_spec_vals = px.hdf_utils.getAuxData(self.h5_main, auxDataName=['Spectroscopic_Values'])[0]\n\n self.step_start_inds = np.where(h5_spec_inds[0] == 0)[0]\n self.num_udvs_steps = len(self.step_start_inds)\n \n ds_guess = px.MicroDataset('Guess', data=[],\n maxshape=(self.h5_main.shape[0], self.num_udvs_steps),\n chunking=(1, self.num_udvs_steps), dtype=sho32)\n\n not_freq = px.hdf_utils.get_attr(h5_spec_inds, 'labels') != 'Frequency'\n\n ds_sho_inds, ds_sho_vals = px.hdf_utils.buildReducedSpec(h5_spec_inds, h5_spec_vals, not_freq,\n self.step_start_inds)\n\n dset_name = self.h5_main.name.split('/')[-1]\n sho_grp = px.MicroDataGroup('-'.join([dset_name, 'SHO_Fit_']), self.h5_main.parent.name[1:])\n sho_grp.addChildren([ds_guess, ds_sho_inds, ds_sho_vals])\n sho_grp.attrs['SHO_guess_method'] = \"pycroscopy BESHO\"\n\n h5_sho_grp_refs = self.hdf.writeData(sho_grp)\n\n self.h5_guess = px.hdf_utils.getH5DsetRefs(['Guess'], h5_sho_grp_refs)[0]\n self.h5_results_grp = self.h5_guess.parent\n h5_sho_inds = px.hdf_utils.getH5DsetRefs(['Spectroscopic_Indices'],\n h5_sho_grp_refs)[0]\n h5_sho_vals = px.hdf_utils.getH5DsetRefs(['Spectroscopic_Values'],\n h5_sho_grp_refs)[0]\n\n # Reference linking before actual fitting\n px.hdf_utils.linkRefs(self.h5_guess, [h5_sho_inds, h5_sho_vals])\n # Linking ancillary position datasets:\n aux_dsets = px.hdf_utils.getAuxData(self.h5_main, auxDataName=['Position_Indices', 'Position_Values'])\n px.hdf_utils.linkRefs(self.h5_guess, aux_dsets)\n print('Finshed creating datasets')\n\n def compute(self, *args, **kwargs):\n \"\"\"\n Apply the unit_function to the entire dataset. Here, we simply extend the existing compute function and only\n pass the parameters for the unit function. In this case, the only parameter is the frequency vector.\n\n Parameters\n ----------\n args\n kwargs\n\n Returns\n -------\n\n \"\"\"\n return super(ShoGuess, self).compute(w_vec=self.freq_vec)\n\n def _write_results_chunk(self):\n \"\"\"\n Write the computed results back to the H5 file\n \"\"\"\n # converting from a list to a 2D numpy array\n self._results = np.array(self._results, dtype=np.float32)\n self.h5_guess[:, 0] = px.io_utils.realToCompound(self._results, sho32)\n\n # Now update the start position\n self._start_pos = self._end_pos\n # this should stop the computation.\n\n @staticmethod\n def _unit_function():\n\n return px.be_sho.SHOestimateGuess"
]
},
{
......@@ -63,10 +83,10 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"execution_count": null,
"outputs": [],
"source": [
"# download the raw data file from Github:\nh5_path = 'temp.h5'\nurl = 'https://raw.githubusercontent.com/pycroscopy/pycroscopy/master/data/BELine_0004.h5'\nif os.path.exists(h5_path):\n os.remove(h5_path)\n_ = wget.download(url, h5_path, bar=None)"
......@@ -74,10 +94,10 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"execution_count": null,
"outputs": [],
"source": [
"# Open the file in read-only mode\nh5_file = h5py.File(h5_path, mode='r+')\n\n# Get handles to the the raw data along with other datasets and datagroups that contain necessary parameters\nh5_meas_grp = h5_file['Measurement_000']\nnum_rows = px.hdf_utils.get_attr(h5_meas_grp, 'grid_num_rows')\nnum_cols = px.hdf_utils.get_attr(h5_meas_grp, 'grid_num_cols')\n\n# Getting a reference to the main dataset:\nh5_main = h5_meas_grp['Channel_000/Raw_Data']\n\n# Extracting the X axis - vector of frequencies\nh5_spec_vals = px.hdf_utils.getAuxData(h5_main, 'Spectroscopic_Values')[-1]\nfreq_vec = np.squeeze(h5_spec_vals.value) * 1E-3"
......@@ -92,10 +112,10 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"execution_count": null,
"outputs": [],
"source": [
"fitter = ShoGuess(h5_main, cores=1)\nh5_results_grp = fitter.compute()\nh5_guess = h5_results_grp['Guess']\n\nrow_ind, col_ind = 103, 19\npix_ind = col_ind + row_ind * num_cols\nresp_vec = h5_main[pix_ind]\nnorm_guess_parms = h5_guess[pix_ind]\n\n# Converting from compound to real:\nnorm_guess_parms = px.io_utils.compound_to_scalar(norm_guess_parms)\nprint('Functional fit returned:', norm_guess_parms)\nnorm_resp = px.be_sho.SHOfunc(norm_guess_parms, freq_vec)"
......@@ -110,10 +130,10 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"execution_count": null,
"outputs": [],
"source": [
"fig, axes = plt.subplots(nrows=2, sharex=True, figsize=(5, 10))\nfor axis, func, title in zip(axes.flat, [np.abs, np.angle], ['Amplitude (a.u.)', 'Phase (rad)']):\n axis.scatter(freq_vec, func(resp_vec), c='red', label='Measured')\n axis.plot(freq_vec, func(norm_resp), 'black', lw=3, label='Guess')\n axis.set_title(title, fontsize=16)\n axis.legend(fontsize=14)\n\naxes[1].set_xlabel('Frequency (kHz)', fontsize=14)\naxes[0].set_ylim([0, np.max(np.abs(resp_vec)) * 1.1])\naxes[1].set_ylim([-np.pi, np.pi])"
......@@ -128,35 +148,15 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"execution_count": null,
"outputs": [],
"source": [
"h5_file.close()\nos.remove(h5_path)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.3"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
\ No newline at end of file
"""
=================================================================
=======================================
Tutorial 5: Formalizing Data Processing
=================================================================
=======================================
**Suhas Somnath**
......@@ -110,7 +110,7 @@ class ShoGuess(px.Process):
h5_main - dataset to compute on
cores - Number of CPU cores to use for computation - Optional
"""
super(ShoGuess, self).__init__(h5_main, cores)
super(ShoGuess, self).__init__(h5_main, cores=cores)
# find the frequency vector
h5_spec_vals = px.hdf_utils.getAuxData(h5_main, 'Spectroscopic_Values')[-1]
......
b11b6c6272c5cb4b8cb9e52cfb61aeec
\ No newline at end of file
bcb118971b2ddcfa63329f0d1fb567a0
\ No newline at end of file
......@@ -3,9 +3,9 @@
.. _sphx_glr_auto_examples_dev_tutorials_plot_tutorial_05_data_processing.py:
=================================================================
=======================================
Tutorial 5: Formalizing Data Processing
=================================================================
=======================================
**Suhas Somnath**
......@@ -128,7 +128,7 @@ Note that:
h5_main - dataset to compute on
cores - Number of CPU cores to use for computation - Optional
"""
super(ShoGuess, self).__init__(h5_main, cores)
super(ShoGuess, self).__init__(h5_main, cores=cores)
# find the frequency vector
h5_spec_vals = px.hdf_utils.getAuxData(h5_main, 'Spectroscopic_Values')[-1]
......@@ -345,7 +345,7 @@ Plot the Amplitude and Phase of the gaussian versus the raw data.
**Total running time of the script:** ( 0 minutes 43.870 seconds)
**Total running time of the script:** ( 0 minutes 44.650 seconds)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment