Commit d4bb3f05 authored by Unknown's avatar Unknown
Browse files

Categorize publication list by instrument

parent a6cc60df
{
"cells": [
{
"execution_count": null,
"cell_type": "code",
"outputs": [],
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"%matplotlib inline"
]
......@@ -19,12 +19,12 @@
]
},
{
"execution_count": null,
"cell_type": "code",
"outputs": [],
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"# Ensure python 3 compatibility:\nfrom __future__ import division, print_function, absolute_import, unicode_literals\n\n# The package for accessing files in directories, etc.:\nimport os\n\n# Warning package in case something goes wrong\nfrom warnings import warn\n\n# Package for downloading online files:\ntry:\n # This package is not part of anaconda and may need to be installed.\n import wget\nexcept ImportError:\n warn('wget not found. Will install with pip.')\n import pip\n pip.main(['install', 'wget'])\n import wget\n\n# The mathematical computation package:\nimport numpy as np\n\n# The package used for creating and manipulating HDF5 files:\nimport h5py\n\n# Packages for plotting:\nimport matplotlib.pyplot as plt\n\n# Finally import pycroscopy for certain scientific analysis:\ntry:\n import pycroscopy as px\nexcept ImportError:\n warn('pycroscopy not found. Will install with pip.')\n import pip\n pip.main(['install', 'pycroscopy'])\n import pycroscopy as px"
]
......@@ -37,12 +37,12 @@
]
},
{
"execution_count": null,
"cell_type": "code",
"outputs": [],
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"url = 'https://raw.githubusercontent.com/pycroscopy/pycroscopy/master/data/STS.asc'\ndata_file_path = 'temp_1.asc'\nif os.path.exists(data_file_path):\n os.remove(data_file_path)\n_ = wget.download(url, data_file_path, bar=None)"
]
......@@ -55,12 +55,12 @@
]
},
{
"execution_count": null,
"cell_type": "code",
"outputs": [],
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"with open(data_file_path, 'r') as file_handle:\n for lin_ind in range(10):\n print(file_handle.readline())"
]
......@@ -73,12 +73,12 @@
]
},
{
"execution_count": null,
"cell_type": "code",
"outputs": [],
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"# Extracting the raw data into memory\nfile_handle = open(data_file_path, 'r')\nstring_lines = file_handle.readlines()\nfile_handle.close()"
]
......@@ -91,12 +91,12 @@
]
},
{
"execution_count": null,
"cell_type": "code",
"outputs": [],
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"# Reading parameters stored in the first few rows of the file\nparm_dict = dict()\nfor line in string_lines[3:17]:\n line = line.replace('# ', '')\n line = line.replace('\\n', '')\n temp = line.split('=')\n test = temp[1].strip()\n try:\n test = float(test)\n # convert those values that should be integers:\n if test % 1 == 0:\n test = int(test)\n except ValueError:\n pass\n parm_dict[temp[0].strip()] = test\n\n# Print out the parameters extracted\nfor key in parm_dict.keys():\n print(key, ':\\t', parm_dict[key])"
]
......@@ -109,12 +109,12 @@
]
},
{
"execution_count": null,
"cell_type": "code",
"outputs": [],
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"num_rows = int(parm_dict['y-pixels'])\nnum_cols = int(parm_dict['x-pixels'])\nnum_pos = num_rows * num_cols\nspectra_length = int(parm_dict['z-points'])"
]
......@@ -127,12 +127,12 @@
]
},
{
"execution_count": null,
"cell_type": "code",
"outputs": [],
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"# num_headers = len(string_lines) - num_pos\nnum_headers = 403\n\n# Extract the STS data from subsequent lines\nraw_data_2d = np.zeros(shape=(num_pos, spectra_length), dtype=np.float32)\nfor line_ind in range(num_pos):\n this_line = string_lines[num_headers + line_ind]\n string_spectrum = this_line.split('\\t')[:-1] # omitting the new line\n raw_data_2d[line_ind] = np.array(string_spectrum, dtype=np.float32)"
]
......@@ -145,12 +145,12 @@
]
},
{
"execution_count": null,
"cell_type": "code",
"outputs": [],
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"max_v = 1 # This is the one parameter we are not sure about\n\nfolder_path, file_name = os.path.split(data_file_path)\nfile_name = file_name[:-4] + '_'\n\n# Generate the x / voltage / spectroscopic axis:\nvolt_vec = np.linspace(-1 * max_v, 1 * max_v, spectra_length)\n\nh5_path = os.path.join(folder_path, file_name + '.h5')"
]
......@@ -163,12 +163,12 @@
]
},
{
"execution_count": null,
"cell_type": "code",
"outputs": [],
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"tran = px.io.NumpyTranslator()\nh5_path = tran.translate(h5_path, raw_data_2d, num_rows, num_cols,\n qty_name='Current', data_unit='nA', spec_name='Bias',\n spec_unit='V', spec_val=volt_vec, scan_height=100,\n scan_width=200, spatial_unit='nm', data_type='STS',\n translator_name='ASC', parms_dict=parm_dict)"
]
......@@ -181,36 +181,36 @@
]
},
{
"execution_count": null,
"cell_type": "code",
"outputs": [],
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"with h5py.File(h5_path, mode='r') as h5_file:\n # See if a tree has been created within the hdf5 file:\n px.hdf_utils.print_tree(h5_file)\n\n h5_main = h5_file['Measurement_000/Channel_000/Raw_Data']\n fig, axes = plt.subplots(ncols=2, figsize=(11, 5))\n spat_map = np.reshape(h5_main[:, 100], (100, 100))\n px.plot_utils.plot_map(axes[0], spat_map, origin='lower')\n axes[0].set_title('Spatial map')\n axes[0].set_xlabel('X')\n axes[0].set_ylabel('Y')\n axes[1].plot(np.linspace(-1.0, 1.0, h5_main.shape[1]),\n h5_main[250])\n axes[1].set_title('IV curve at a single pixel')\n axes[1].set_xlabel('Tip bias [V]')\n axes[1].set_ylabel('Current [nA]')\n\n# Remove both the original and translated files:\nos.remove(h5_path)\nos.remove(data_file_path)"
]
}
],
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"version": 3,
"name": "ipython"
"name": "ipython",
"version": 3
},
"nbconvert_exporter": "python",
"file_extension": ".py",
"version": "3.5.2",
"pygments_lexer": "ipython3",
"mimetype": "text/x-python",
"name": "python",
"mimetype": "text/x-python"
},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
}
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.3"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
\ No newline at end of file
431362ec4dbb3d9e362646182c626de6
\ No newline at end of file
f9d553a6abb29fc37c51bd4c439888f7
\ No newline at end of file
......@@ -249,20 +249,20 @@ The parameters in these files are present in the first few lines of the file
Out::
voidpixels : 0
x-pixels : 100
y-pixels : 100
x-length : 29.7595
z-offset : 1116.49
y-length : 29.7595
value-unit : nA
scanspeed : 59519000000
y-pixels : 100
x-offset : -967.807
y-offset : -781.441
z-range : 2000000000
z-points : 500
z-section : 491
x-offset : -967.807
z-unit : nV
z-points : 500
x-pixels : 100
z-range : 2000000000
z-offset : 1116.49
value-unit : nA
scanspeed : 59519000000
voidpixels : 0
3.a Prepare to read the data
......@@ -418,7 +418,7 @@ Verifying the newly written H5 file:
Measurement_000/Channel_000/Spectroscopic_Values
**Total running time of the script:** ( 5 minutes 40.598 seconds)
**Total running time of the script:** ( 0 minutes 4.833 seconds)
......
{
"cells": [
{
"execution_count": null,
"cell_type": "code",
"outputs": [],
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"%matplotlib inline"
]
......@@ -19,12 +19,12 @@
]
},
{
"execution_count": null,
"cell_type": "code",
"outputs": [],
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"# Ensure python 3 compatibility:\nfrom __future__ import division, print_function, absolute_import, unicode_literals\n\n# The package for accessing files in directories, etc.:\nimport os\n\n# Warning package in case something goes wrong\nfrom warnings import warn\n\n# Package for downloading online files:\ntry:\n # This package is not part of anaconda and may need to be installed.\n import wget\nexcept ImportError:\n warn('wget not found. Will install with pip.')\n import pip\n pip.main(['install', 'wget'])\n import wget\n\n# The mathematical computation package:\nimport numpy as np\n\n# The package used for creating and manipulating HDF5 files:\nimport h5py\n\n# Packages for plotting:\nimport matplotlib.pyplot as plt\n\n# Package for performing k-Means clustering:\nfrom sklearn.cluster import KMeans\n\n# Finally import pycroscopy for certain scientific analysis:\ntry:\n import pycroscopy as px\nexcept ImportError:\n warn('pycroscopy not found. Will install with pip.')\n import pip\n pip.main(['install', 'pycroscopy'])\n import pycroscopy as px\nfrom pycroscopy.io.translators.omicron_asc import AscTranslator"
]
......@@ -37,12 +37,12 @@
]
},
{
"execution_count": null,
"cell_type": "code",
"outputs": [],
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"# download the raw data file from Github:\ndata_file_path = 'temp_2.asc'\nurl = 'https://raw.githubusercontent.com/pycroscopy/pycroscopy/master/data/STS.asc'\nif os.path.exists(data_file_path):\n os.remove(data_file_path)\n_ = wget.download(url, data_file_path, bar=None)\n\n# Translating from raw data to h5:\ntran = AscTranslator()\nh5_path = tran.translate(data_file_path)"
]
......@@ -55,12 +55,12 @@
]
},
{
"execution_count": null,
"cell_type": "code",
"outputs": [],
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"# opening the file:\nhdf = px.ioHDF5(h5_path)\nh5_file = hdf.file\n\n# Visualize the tree structure in the file\nprint('Tree structure within the file:')\npx.hdf_utils.print_tree(h5_file)\n\n# Extracting some parameters that will be necessary later on:\nh5_meas_grp = h5_file['Measurement_000']\nnum_cols = int(px.hdf_utils.get_attr(h5_meas_grp, 'x-pixels'))\nnum_rows = int(px.hdf_utils.get_attr(h5_meas_grp, 'y-pixels'))\n\n# There are multiple ways of accessing the Raw_Data dataset. Here's one approach:\nh5_main = h5_meas_grp['Channel_000/Raw_Data']\n\n# Prepare the label for plots:\ny_label = px.hdf_utils.get_attr(h5_main, 'quantity') + ' [' + px.hdf_utils.get_attr(h5_main, 'units') + ']'\n\n# Get the voltage vector that this data was acquired as a function of:\nh5_spec_vals = px.hdf_utils.getAuxData(h5_main, 'Spectroscopic_Values')[0]\nvolt_vec = np.squeeze(h5_spec_vals[()])\n\n# Get the descriptor for this\nx_label = px.hdf_utils.get_attr(h5_spec_vals, 'labels')[0] + ' [' + px.hdf_utils.get_attr(h5_spec_vals, 'units')[0] + ']'\n\n# Currently, the data is within the h5 dataset. We need to read this to memory:\ndata_mat = h5_main[()]\n\nprint('\\nData now loaded to memory and is of shape:', data_mat.shape)\nprint('Data has', num_rows, 'rows and', num_cols, 'columns each having a',\n data_mat.shape[1], 'long measurement of', y_label,'as a function of', x_label)"
]
......@@ -73,12 +73,12 @@
]
},
{
"execution_count": null,
"cell_type": "code",
"outputs": [],
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"num_clusters = 9\n\n# Now, we can perform k-Means clustering:\nestimators = KMeans(num_clusters)\nresults = estimators.fit(data_mat)\n\nprint('K-Means Clustering performed on the dataset of shape', data_mat.shape,\n 'resulted in a cluster centers matrix of shape', results.cluster_centers_.shape,\n 'and a labels array of shape', results.labels_.shape)\n\n\"\"\"\nBy default, the clusters identified by K-Means are NOT arranged according to their relative \ndistances to each other. Visualizing and interpreting this data is challenging. We will sort the \nresults using a handy function already in pycroscopy:\n\"\"\"\nlabels, centroids = px.processing.cluster.reorder_clusters(results.labels_, results.cluster_centers_)"
]
......@@ -91,12 +91,12 @@
]
},
{
"execution_count": null,
"cell_type": "code",
"outputs": [],
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"px.plot_utils.plot_cluster_results_together(np.reshape(results.labels_, (num_rows, num_cols)),\n results.cluster_centers_, spec_val=volt_vec, cmap=plt.cm.inferno,\n spec_label=x_label, resp_label=y_label);\n\npx.plot_utils.plot_cluster_results_together(np.reshape(labels, (num_rows, num_cols)),\n centroids, spec_val=volt_vec, cmap=plt.cm.inferno,\n spec_label=x_label, resp_label=y_label);"
]
......@@ -116,12 +116,12 @@
]
},
{
"execution_count": null,
"cell_type": "code",
"outputs": [],
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"ds_labels_spec_inds, ds_labels_spec_vals = px.io.translators.utils.build_ind_val_dsets([1], labels=['Label'])\nds_cluster_inds, ds_cluster_vals = px.io.translators.utils.build_ind_val_dsets([centroids.shape[0]], is_spectral=False,\n labels=['Cluster'])\nlabels_mat = np.uint32(labels.reshape([-1, 1]))\n\n# Rename the datasets\nds_labels_spec_inds.name = 'Label_Spectroscopic_Indices'\nds_labels_spec_vals.name = 'Label_Spectroscopic_Values'\nds_cluster_inds.name = 'Cluster_Indices'\nds_cluster_vals.name = 'Cluster_Values'\n\nprint('Spectroscopic Dataset for Labels', ds_labels_spec_inds.shape)\nprint('Position Dataset for Centroids', ds_cluster_inds.shape)\nprint('Centroids',centroids.shape)\nprint('Labels', labels_mat.shape)"
]
......@@ -134,12 +134,12 @@
]
},
{
"execution_count": null,
"cell_type": "code",
"outputs": [],
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"# The two main datasets\nds_label_mat = px.MicroDataset('Labels', labels_mat, dtype=np.uint32)\n# Adding the mandatory attributes\nds_label_mat.attrs = {'quantity': 'Cluster ID', 'units': 'a. u.'}\n\nds_cluster_centroids = px.MicroDataset('Mean_Response', centroids, dtype=h5_main.dtype)\n# Inhereting / copying the mandatory attributes\npx.hdf_utils.copy_main_attributes(h5_main, ds_cluster_centroids)"
]
......@@ -152,12 +152,12 @@
]
},
{
"execution_count": null,
"cell_type": "code",
"outputs": [],
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"source_dset_name = h5_main.name.split('/')[-1]\noperation_name = 'Cluster'\n\nsubtree_root_path = h5_main.parent.name[1:]\n\ncluster_grp = px.MicroDataGroup(source_dset_name + '-' + operation_name + '_',\n subtree_root_path)\nprint('New group to be created with name:', cluster_grp.name)\nprint('This group (subtree) will be appended to the H5 file under the group:', subtree_root_path)\n\n# Making a tree structure by adding the MicroDataset objects as children of this group\ncluster_grp.addChildren([ds_label_mat, ds_cluster_centroids, ds_cluster_inds, ds_cluster_vals, ds_labels_spec_inds,\n ds_labels_spec_vals])\n\nprint('\\nWill write the following tree:')\ncluster_grp.showTree()\n\ncluster_grp.attrs['num_clusters'] = num_clusters\ncluster_grp.attrs['num_samples'] = h5_main.shape[0]\ncluster_grp.attrs['cluster_algorithm'] = 'KMeans'\n\n# Get the parameters of the KMeans object that was used and write them as attributes of the group\nfor parm in estimators.get_params().keys():\n cluster_grp.attrs[parm] = estimators.get_params()[parm]\n\nprint('\\nWriting the following attrbutes to the group:')\nfor at_name in cluster_grp.attrs:\n print(at_name, ':', cluster_grp.attrs[at_name])"
]
......@@ -170,12 +170,12 @@
]
},
{
"execution_count": null,
"cell_type": "code",
"outputs": [],
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"h5_clust_refs = hdf.writeData(cluster_grp, print_log=True)\n\nh5_labels = px.hdf_utils.getH5DsetRefs(['Labels'], h5_clust_refs)[0]\nh5_centroids = px.hdf_utils.getH5DsetRefs(['Mean_Response'], h5_clust_refs)[0]\nh5_clust_inds = px.hdf_utils.getH5DsetRefs(['Cluster_Indices'], h5_clust_refs)[0]\nh5_clust_vals = px.hdf_utils.getH5DsetRefs(['Cluster_Values'], h5_clust_refs)[0]\nh5_label_inds = px.hdf_utils.getH5DsetRefs(['Label_Spectroscopic_Indices'], h5_clust_refs)[0]\nh5_label_vals = px.hdf_utils.getH5DsetRefs(['Label_Spectroscopic_Values'], h5_clust_refs)[0]"
]
......@@ -188,12 +188,12 @@
]
},
{
"execution_count": null,
"cell_type": "code",
"outputs": [],
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"px.hdf_utils.print_tree(h5_file)"
]
......@@ -206,12 +206,12 @@
]
},
{
"execution_count": null,
"cell_type": "code",
"outputs": [],
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"# we already got the reference to the spectroscopic values in the first few cells\nh5_spec_inds = px.hdf_utils.getAuxData(h5_main, 'Spectroscopic_Indices')[0]\n\npx.hdf_utils.checkAndLinkAncillary(h5_labels,\n ['Position_Indices', 'Position_Values'],\n h5_main=h5_main)\npx.hdf_utils.checkAndLinkAncillary(h5_labels,\n ['Spectroscopic_Indices', 'Spectroscopic_Values'],\n anc_refs=[h5_label_inds, h5_label_vals])\n\npx.hdf_utils.checkAndLinkAncillary(h5_centroids,\n ['Spectroscopic_Indices', 'Spectroscopic_Values'],\n anc_refs=[h5_spec_inds, h5_spec_vals])\n\npx.hdf_utils.checkAndLinkAncillary(h5_centroids,\n ['Position_Indices', 'Position_Values'],\n anc_refs=[h5_clust_inds, h5_clust_vals])"
]
......@@ -224,12 +224,12 @@
]
},
{
"execution_count": null,
"cell_type": "code",
"outputs": [],
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"px.plot_utils.plot_cluster_h5_group(h5_labels.parent, '');"
]
......@@ -242,36 +242,36 @@
]
},
{
"execution_count": null,
"cell_type": "code",
"outputs": [],
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"os.remove(data_file_path)\nhdf.close()\nos.remove(h5_path)"
]
}
],
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"version": 3,
"name": "ipython"
"name": "ipython",
"version": 3
},
"nbconvert_exporter": "python",
"file_extension": ".py",
"version": "3.5.2",
"pygments_lexer": "ipython3",
"mimetype": "text/x-python",
"name": "python",
"mimetype": "text/x-python"
},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
}
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.3"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
\ No newline at end of file
f118f7b68c6a8fc606b47cb585533630
\ No newline at end of file
1c43f435cafa264ecba622a3c57313dc
\ No newline at end of file
......@@ -500,22 +500,22 @@ operation being performed on the same dataset. The index will then be updated ac
Measurement_000/Channel_000Raw_Data-Cluster_/Label_Spectroscopic_Values
Writing the following attrbutes to the group:
timestamp : 2017_11_29-10_00_47
n_init : 10
n_jobs : 1
algorithm : auto
random_state : None
machine_id : PC95444.ornl.gov
timestamp : 2017_12_04-14_18_59
num_clusters : 9
verbose : 0
num_samples : 10000
tol : 0.0001
init : k-means++
cluster_algorithm : KMeans
precompute_distances : auto
machine_id : challtdow-ThinkPad-T530
n_clusters : 9
max_iter : 300
algorithm : auto
copy_x : True
init : k-means++
max_iter : 300
n_clusters : 9
n_init : 10
n_jobs : 1
precompute_distances : auto
random_state : None
tol : 0.0001
verbose : 0
Write to H5 and access the written objects
......@@ -546,35 +546,35 @@ Once the tree is prepared (previous cell), ioHDF5 will handle all the file writi
Out::
Created group /Measurement_000/Channel_000/Raw_Data-Cluster_000
Writing attribute: timestamp with value: 2017_11_29-10_00_47
Writing attribute: n_init with value: 10
Writing attribute: n_jobs with value: 1
Writing attribute: algorithm with value: auto
Writing attribute: machine_id with value: PC95444.ornl.gov
Writing attribute: timestamp with value: 2017_12_04-14_18_59
Writing attribute: num_clusters with value: 9
Writing attribute: verbose with value: 0
Writing attribute: num_samples with value: 10000
Writing attribute: tol with value: 0.0001
Writing attribute: init with value: k-means++
Writing attribute: cluster_algorithm with value: KMeans
Writing attribute: precompute_distances with value: auto
Writing attribute: machine_id with value: challtdow-ThinkPad-T530
Writing attribute: n_clusters with value: 9
Writing attribute: max_iter with value: 300
Writing attribute: algorithm with value: auto
Writing attribute: copy_x with value: True
Writing attribute: init with value: k-means++
Writing attribute: max_iter with value: 300
Writing attribute: n_clusters with value: 9
Writing attribute: n_init with value: 10
Writing attribute: n_jobs with value: 1
Writing attribute: precompute_distances with value: auto
Writing attribute: tol with value: 0.0001
Writing attribute: verbose with value: 0
Wrote attributes to group: Raw_Data-Cluster_000
Created Dataset /Measurement_000/Channel_000/Raw_Data-Cluster_000/Labels
Writing attribute: units with value: a. u.
Writing attribute: quantity with value: Cluster ID
Wrote Attributes of Dataset Labels
Writing attribute: quantity with value: Cluster ID
Writing attribute: units with value: a. u.
Wrote Attributes of Dataset Labels
Created Dataset /Measurement_000/Channel_000/Raw_Data-Cluster_000/Mean_Response
Writing attribute: units with value: nA
Writing attribute: quantity with value: Current
Wrote Attributes of Dataset Mean_Response
Writing attribute: quantity with value: Current
Writing attribute: units with value: nA
Wrote Attributes of Dataset Mean_Response
Created Dataset /Measurement_000/Channel_000/Raw_Data-Cluster_000/Cluster_Indices
......@@ -724,7 +724,7 @@ Deletes the temporary files created in the example
**Total running time of the script:** ( 2 minutes 23.307 seconds)
**Total running time of the script:** ( 0 minutes 14.929 seconds)
......
{
"cells": [
{
"execution_count": null,
"cell_type": "code",
"outputs": [],
"execution_count": null,
"metadata": {
"collapsed": false