diff --git a/Code/Mantid/Framework/API/inc/MantidAPI/DataProcessorAlgorithm.h b/Code/Mantid/Framework/API/inc/MantidAPI/DataProcessorAlgorithm.h index 2cc4b78212ec9c2cdc8f8974d17eeaf7bda6f787..143670ddf945b8fe7abf7ca36f6aba07453d7662 100644 --- a/Code/Mantid/Framework/API/inc/MantidAPI/DataProcessorAlgorithm.h +++ b/Code/Mantid/Framework/API/inc/MantidAPI/DataProcessorAlgorithm.h @@ -56,8 +56,9 @@ protected: void setAccumAlg(const std::string &alg); void setPropManagerPropName(const std::string &propName); void mapPropertyName(const std::string &nameInProp, const std::string &nameInPropManager); - ITableWorkspace_sptr determineChunk(); - void loadChunk(); + void copyProperty(API::Algorithm_sptr alg, const std::string& name); + virtual ITableWorkspace_sptr determineChunk(const std::string &filename); + virtual MatrixWorkspace_sptr loadChunk(const size_t rowIndex); Workspace_sptr load(const std::string &inputData, const bool loadQuiet = false); std::vector<std::string> splitInput(const std::string &input); diff --git a/Code/Mantid/Framework/API/src/DataProcessorAlgorithm.cpp b/Code/Mantid/Framework/API/src/DataProcessorAlgorithm.cpp index 227a84bea1a4ecebc6e482af758647c1c7868d75..c28b98018000c5c35e0af37b7c99add5bfc7073a 100644 --- a/Code/Mantid/Framework/API/src/DataProcessorAlgorithm.cpp +++ b/Code/Mantid/Framework/API/src/DataProcessorAlgorithm.cpp @@ -109,6 +109,27 @@ void DataProcessorAlgorithm::mapPropertyName(const std::string &nameInProp, m_nameToPMName[nameInProp] = nameInPropManager; } +/** + * Copy a property from an existing algorithm. + * + * @warning This only works if you algorithm is in the WorkflowAlgorithms sub-project. + * + * @param alg + * @param name + * + * @throws std::runtime_error If you ask to copy a non-existent property + */ +void DataProcessorAlgorithm::copyProperty(API::Algorithm_sptr alg, const std::string& name) { + if (! alg->existsProperty(name)) { + std::stringstream msg; + msg << "Algorithm \"" << alg->name() << "\" does not have property \"" << name << "\""; + throw std::runtime_error(msg.str()); + } + + auto prop = alg->getPointerToProperty(name); + declareProperty(prop->clone(), prop->documentation()); +} + /** * Get the property held by this object. If the value is the default see if it * contained in the PropertyManager. @see Algorithm::getPropertyValue(const string &) @@ -161,12 +182,15 @@ PropertyManagerOwner::TypedValue DataProcessorAlgorithm::getProperty(const std:: return Algorithm::getProperty(name); } -ITableWorkspace_sptr DataProcessorAlgorithm::determineChunk() { +ITableWorkspace_sptr DataProcessorAlgorithm::determineChunk(const std::string &filename) { + UNUSED_ARG(filename); + throw std::runtime_error( "DataProcessorAlgorithm::determineChunk is not implemented"); } -void DataProcessorAlgorithm::loadChunk() { +MatrixWorkspace_sptr DataProcessorAlgorithm::loadChunk(const size_t rowIndex) { + UNUSED_ARG(rowIndex); throw std::runtime_error( "DataProcessorAlgorithm::loadChunk is not implemented"); diff --git a/Code/Mantid/Framework/PythonInterface/inc/MantidPythonInterface/api/PythonAlgorithm/DataProcessorAdapter.h b/Code/Mantid/Framework/PythonInterface/inc/MantidPythonInterface/api/PythonAlgorithm/DataProcessorAdapter.h index 2d434f6463a9fe2e6807e16893d77b8ac8d3d77b..b198937f1c81f2b118507887fe291b1cde3ab771 100644 --- a/Code/Mantid/Framework/PythonInterface/inc/MantidPythonInterface/api/PythonAlgorithm/DataProcessorAdapter.h +++ b/Code/Mantid/Framework/PythonInterface/inc/MantidPythonInterface/api/PythonAlgorithm/DataProcessorAdapter.h @@ -62,11 +62,11 @@ public: void setAccumAlgProxy(const std::string &alg) { this->setAccumAlg(alg); } - API::ITableWorkspace_sptr determineChunkProxy() { - return this->determineChunk(); + API::ITableWorkspace_sptr determineChunkProxy(const std::string &filename) { + return this->determineChunk(filename); } - void loadChunkProxy() { this->loadChunk(); } + void loadChunkProxy(const size_t rowIndex) { this->loadChunk(rowIndex); } API::Workspace_sptr loadProxy(const std::string &inputData, const bool loadQuiet = false) { diff --git a/Code/Mantid/Framework/WorkflowAlgorithms/CMakeLists.txt b/Code/Mantid/Framework/WorkflowAlgorithms/CMakeLists.txt index f9bc0b6b5801c1cd08e2ad228c98e5a203a2bee4..8c536b09c7e34ea631e17ab2fb8a7622d8f76b3f 100644 --- a/Code/Mantid/Framework/WorkflowAlgorithms/CMakeLists.txt +++ b/Code/Mantid/Framework/WorkflowAlgorithms/CMakeLists.txt @@ -18,6 +18,7 @@ set ( SRC_FILES src/HFIRInstrument.cpp src/HFIRLoad.cpp src/HFIRSANSNormalise.cpp + src/LoadEventAndCompress.cpp src/MuonCalculateAsymmetry.cpp src/MuonLoad.cpp src/RefReduction.cpp @@ -57,6 +58,7 @@ set ( INC_FILES inc/MantidWorkflowAlgorithms/HFIRInstrument.h inc/MantidWorkflowAlgorithms/HFIRLoad.h inc/MantidWorkflowAlgorithms/HFIRSANSNormalise.h + inc/MantidWorkflowAlgorithms/LoadEventAndCompress.h inc/MantidWorkflowAlgorithms/MuonCalculateAsymmetry.h inc/MantidWorkflowAlgorithms/MuonLoad.h inc/MantidWorkflowAlgorithms/RefReduction.h @@ -74,6 +76,7 @@ set ( INC_FILES ) set ( TEST_FILES + LoadEventAndCompressTest.h MuonCalculateAsymmetryTest.h MuonLoadTest.h SANSSolidAngleCorrectionTest.h diff --git a/Code/Mantid/Framework/WorkflowAlgorithms/inc/MantidWorkflowAlgorithms/LoadEventAndCompress.h b/Code/Mantid/Framework/WorkflowAlgorithms/inc/MantidWorkflowAlgorithms/LoadEventAndCompress.h new file mode 100644 index 0000000000000000000000000000000000000000..10768255f19289093c85887e3d7e880601097ab8 --- /dev/null +++ b/Code/Mantid/Framework/WorkflowAlgorithms/inc/MantidWorkflowAlgorithms/LoadEventAndCompress.h @@ -0,0 +1,61 @@ +#ifndef MANTID_WORKFLOWALGORITHMS_LOADEVENTANDCOMPRESS_H_ +#define MANTID_WORKFLOWALGORITHMS_LOADEVENTANDCOMPRESS_H_ + +#include "MantidKernel/System.h" +#include "MantidAPI/DataProcessorAlgorithm.h" +#include "MantidAPI/ITableWorkspace_fwd.h" + +namespace Mantid { +namespace WorkflowAlgorithms { + +/** LoadEventAndCompress : TODO: DESCRIPTION + + Copyright © 2015 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge + National Laboratory & European Spallation Source + + This file is part of Mantid. + + Mantid is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + Mantid is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. + + File change history is stored at: <https://github.com/mantidproject/mantid> + Code Documentation is available at: <http://doxygen.mantidproject.org> +*/ +class DLLExport LoadEventAndCompress : public API::DataProcessorAlgorithm { +public: + LoadEventAndCompress(); + virtual ~LoadEventAndCompress(); + + virtual const std::string name() const; + virtual int version() const; + virtual const std::string category() const; + virtual const std::string summary() const; + +protected: + API::ITableWorkspace_sptr determineChunk(const std::string &filename); + API::MatrixWorkspace_sptr loadChunk(const size_t rowIndex); + void processChunk(API::MatrixWorkspace_sptr wksp); + +private: + void init(); + void exec(); + + std::string m_filename; + double m_filterBadPulses; + API::ITableWorkspace_sptr m_chunkingTable; +}; + +} // namespace WorkflowAlgorithms +} // namespace Mantid + +#endif /* MANTID_WORKFLOWALGORITHMS_LOADEVENTANDCOMPRESS_H_ */ diff --git a/Code/Mantid/Framework/WorkflowAlgorithms/src/LoadEventAndCompress.cpp b/Code/Mantid/Framework/WorkflowAlgorithms/src/LoadEventAndCompress.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c603ffdf9da6763846a6d2de7051e3359fb8915e --- /dev/null +++ b/Code/Mantid/Framework/WorkflowAlgorithms/src/LoadEventAndCompress.cpp @@ -0,0 +1,227 @@ +#include "MantidWorkflowAlgorithms/LoadEventAndCompress.h" +#include "MantidAPI/AlgorithmManager.h" +#include "MantidAPI/FileProperty.h" +#include "MantidAPI/FrameworkManager.h" +#include "MantidAPI/ITableWorkspace.h" +#include "MantidDataObjects/EventWorkspace.h" +#include "MantidKernel/ArrayProperty.h" +#include "MantidKernel/BoundedValidator.h" +#include "MantidKernel/VisibleWhenProperty.h" + +namespace Mantid { +namespace WorkflowAlgorithms { + +using std::size_t; +using std::string; +using namespace Kernel; +using namespace API; +using namespace DataObjects; + +// Register the algorithm into the AlgorithmFactory +DECLARE_ALGORITHM(LoadEventAndCompress) + +//---------------------------------------------------------------------------------------------- +/** Constructor + */ +LoadEventAndCompress::LoadEventAndCompress() : m_filterBadPulses(EMPTY_DBL()) {} + +//---------------------------------------------------------------------------------------------- +/** Destructor + */ +LoadEventAndCompress::~LoadEventAndCompress() {} + +//---------------------------------------------------------------------------------------------- + +/// Algorithms name for identification. @see Algorithm::name +const string LoadEventAndCompress::name() const { return "LoadEventAndCompress"; } + +/// Algorithm's version for identification. @see Algorithm::version +int LoadEventAndCompress::version() const { return 1; } + +/// Algorithm's category for identification. @see Algorithm::category +const string LoadEventAndCompress::category() const { + return "Workflow\\DataHandling"; +} + +/// Algorithm's summary for use in the GUI and help. @see Algorithm::summary +const string LoadEventAndCompress::summary() const { + return "Load an event workspace by chunks and compress"; +} + +//---------------------------------------------------------------------------------------------- +/** Initialize the algorithm's properties. + */ +void LoadEventAndCompress::init() { + // algorithms to copy properties from + auto algLoadEventNexus = AlgorithmManager::Instance().createUnmanaged("LoadEventNexus"); + algLoadEventNexus->initialize(); + auto algDetermineChunking = + AlgorithmManager::Instance().createUnmanaged("DetermineChunking"); + algDetermineChunking->initialize(); + + // declare properties + copyProperty(algLoadEventNexus, "Filename"); + copyProperty(algLoadEventNexus, "OutputWorkspace"); + copyProperty(algDetermineChunking, "MaxChunkSize"); + + copyProperty(algLoadEventNexus, "FilterByTofMin"); + copyProperty(algLoadEventNexus, "FilterByTofMax"); + copyProperty(algLoadEventNexus, "FilterByTimeStart"); + copyProperty(algLoadEventNexus, "FilterByTimeStop"); + + std::string grp1 = "Filter Events"; + setPropertyGroup("FilterByTofMin", grp1); + setPropertyGroup("FilterByTofMax", grp1); + setPropertyGroup("FilterByTimeStart", grp1); + setPropertyGroup("FilterByTimeStop", grp1); + + copyProperty(algLoadEventNexus, "NXentryName"); + copyProperty(algLoadEventNexus, "LoadMonitors"); + copyProperty(algLoadEventNexus, "MonitorsAsEvents"); + copyProperty(algLoadEventNexus, "FilterMonByTofMin"); + copyProperty(algLoadEventNexus, "FilterMonByTofMax"); + copyProperty(algLoadEventNexus, "FilterMonByTimeStart"); + copyProperty(algLoadEventNexus, "FilterMonByTimeStop"); + + setPropertySettings( + "MonitorsAsEvents", + new VisibleWhenProperty("LoadMonitors", IS_EQUAL_TO, "1")); + IPropertySettings *asEventsIsOn = + new VisibleWhenProperty("MonitorsAsEvents", IS_EQUAL_TO, "1"); + setPropertySettings("FilterMonByTofMin", asEventsIsOn); + setPropertySettings("FilterMonByTofMax", asEventsIsOn->clone()); + setPropertySettings("FilterMonByTimeStart", asEventsIsOn->clone()); + setPropertySettings("FilterMonByTimeStop", asEventsIsOn->clone()); + + std::string grp4 = "Monitors"; + setPropertyGroup("LoadMonitors", grp4); + setPropertyGroup("MonitorsAsEvents", grp4); + setPropertyGroup("FilterMonByTofMin", grp4); + setPropertyGroup("FilterMonByTofMax", grp4); + setPropertyGroup("FilterMonByTimeStart", grp4); + setPropertyGroup("FilterMonByTimeStop", grp4); + + auto range = boost::make_shared<BoundedValidator<double>>(); + range->setBounds(0., 100.); + declareProperty("FilterBadPulses", 95., range); +} + +/// @see DataProcessorAlgorithm::determineChunk(const std::string &) +ITableWorkspace_sptr +LoadEventAndCompress::determineChunk(const std::string &filename) { + double maxChunkSize = getProperty("MaxChunkSize"); + + auto alg = createChildAlgorithm("DetermineChunking"); + alg->setProperty("Filename", filename); + alg->setProperty("MaxChunkSize", maxChunkSize); + alg->executeAsChildAlg(); + ITableWorkspace_sptr chunkingTable = alg->getProperty("OutputWorkspace"); + + if (chunkingTable->rowCount() > 1) + g_log.information() << "Will load data in " << chunkingTable->rowCount() + << " chunks\n"; + else + g_log.information("Not chunking"); + + return chunkingTable; +} + +/// @see DataProcessorAlgorithm::loadChunk(const size_t) +MatrixWorkspace_sptr LoadEventAndCompress::loadChunk(const size_t rowIndex) { + g_log.debug() << "loadChunk(" << rowIndex << ")\n"; + + double rowCount = static_cast<double>(m_chunkingTable->rowCount()); + double progStart = static_cast<double>(rowIndex) / rowCount; + double progStop = static_cast<double>(rowIndex + 1) / rowCount; + + auto alg = createChildAlgorithm("LoadEventNexus", progStart, progStop, true); + alg->setProperty<string>("Filename", getProperty("Filename")); + alg->setProperty<double>("FilterByTofMin", getProperty("FilterByTofMin")); + alg->setProperty<double>("FilterByTofMax", getProperty("FilterByTofMax")); + alg->setProperty<double>("FilterByTimeStart", + getProperty("FilterByTimeStart")); + alg->setProperty<double>("FilterByTimeStop", getProperty("FilterByTimeStop")); + + alg->setProperty<string>("NXentryName", getProperty("NXentryName")); + alg->setProperty<bool>("LoadMonitors", getProperty("LoadMonitors")); + alg->setProperty<bool>("MonitorsAsEvents", getProperty("MonitorsAsEvents")); + alg->setProperty<double>("FilterMonByTofMin", + getProperty("FilterMonByTofMin")); + alg->setProperty<double>("FilterMonByTofMax", + getProperty("FilterMonByTofMax")); + alg->setProperty<double>("FilterMonByTimeStart", + getProperty("FilterMonByTimeStart")); + alg->setProperty<double>("FilterMonByTimeStop", + getProperty("FilterMonByTimeStop")); + + // set chunking information + if (rowCount > 0.) { + const std::vector<string> COL_NAMES = m_chunkingTable->getColumnNames(); + for (auto name = COL_NAMES.begin(); name != COL_NAMES.end(); ++name) { + alg->setProperty(*name, m_chunkingTable->getRef<int>(*name, rowIndex)); + } + } + + alg->executeAsChildAlg(); + Workspace_sptr wksp = alg->getProperty("OutputWorkspace"); + return boost::dynamic_pointer_cast<MatrixWorkspace>(wksp); +} + +/** + * Process a chunk in-place + * + * @param wksp + */ +void LoadEventAndCompress::processChunk(API::MatrixWorkspace_sptr wksp) { + EventWorkspace_sptr eventWS = + boost::dynamic_pointer_cast<EventWorkspace>(wksp); + + if (m_filterBadPulses > 0.) { + auto filterBadPulses = createChildAlgorithm("FilterBadPulses"); + filterBadPulses->setProperty("InputWorkspace", eventWS); + filterBadPulses->setProperty("OutputWorkspace", eventWS); + filterBadPulses->setProperty("LowerCutoff", m_filterBadPulses); + filterBadPulses->executeAsChildAlg(); + } + + auto compressEvents = createChildAlgorithm("CompressEvents"); + compressEvents->setProperty("InputWorkspace", eventWS); + compressEvents->setProperty("OutputWorkspace", eventWS); + compressEvents->executeAsChildAlg(); +} + +//---------------------------------------------------------------------------------------------- +/** Execute the algorithm. + */ +void LoadEventAndCompress::exec() { + m_filename = getPropertyValue("Filename"); + m_filterBadPulses = getProperty("FilterBadPulses"); + + m_chunkingTable = determineChunk(m_filename); + + // first run is free + EventWorkspace_sptr resultWS = + boost::dynamic_pointer_cast<EventWorkspace>(loadChunk(0)); + processChunk(resultWS); + + // load the other chunks + const size_t numRows = m_chunkingTable->rowCount(); + for (size_t i = 1; i < numRows; ++i) { + MatrixWorkspace_sptr temp = loadChunk(i); + processChunk(temp); + auto alg = createChildAlgorithm("Plus"); + alg->setProperty("LHSWorkspace", resultWS); + alg->setProperty("RHSWorkspace", temp); + alg->setProperty("OutputWorkspace", resultWS); + alg->setProperty("ClearRHSWorkspace", true); + alg->executeAsChildAlg(); + } + + // Don't bother compressing combined workspace. DetermineChunking is designed + // to prefer loading full banks so no further savings should be available. + + setProperty("OutputWorkspace", resultWS); +} + +} // namespace WorkflowAlgorithms +} // namespace Mantid diff --git a/Code/Mantid/Framework/WorkflowAlgorithms/test/LoadEventAndCompressTest.h b/Code/Mantid/Framework/WorkflowAlgorithms/test/LoadEventAndCompressTest.h new file mode 100644 index 0000000000000000000000000000000000000000..a1fc310b9320d0b32d18df80f14eed60b1d7f28d --- /dev/null +++ b/Code/Mantid/Framework/WorkflowAlgorithms/test/LoadEventAndCompressTest.h @@ -0,0 +1,96 @@ +#ifndef MANTID_WORKFLOWALGORITHMS_LOADEVENTANDCOMPRESSTEST_H_ +#define MANTID_WORKFLOWALGORITHMS_LOADEVENTANDCOMPRESSTEST_H_ + +#include <cxxtest/TestSuite.h> + +#include "MantidAPI/FrameworkManager.h" +#include "MantidDataObjects/EventWorkspace.h" +#include "MantidWorkflowAlgorithms/LoadEventAndCompress.h" + +using Mantid::WorkflowAlgorithms::LoadEventAndCompress; +using namespace Mantid::DataObjects; +using namespace Mantid::API; + +class LoadEventAndCompressTest : public CxxTest::TestSuite { +public: + // This pair of boilerplate methods prevent the suite being created statically + // This means the constructor isn't called when running other tests + static LoadEventAndCompressTest *createSuite() { + return new LoadEventAndCompressTest(); + } + static void destroySuite(LoadEventAndCompressTest *suite) { delete suite; } + + void test_Init() { + LoadEventAndCompress alg; + TS_ASSERT_THROWS_NOTHING(alg.initialize()); + TS_ASSERT(alg.isInitialized()); + } + + void test_exec() { + const std::string FILENAME("ARCS_sim_event.nxs"); + + // run without chunks + const std::string WS_NAME_NO_CHUNKS("LoadEventAndCompress_no_chunks"); + LoadEventAndCompress algWithoutChunks; + TS_ASSERT_THROWS_NOTHING(algWithoutChunks.initialize()); + TS_ASSERT(algWithoutChunks.isInitialized()); + TS_ASSERT_THROWS_NOTHING( + algWithoutChunks.setPropertyValue("Filename", FILENAME)); + TS_ASSERT_THROWS_NOTHING(algWithoutChunks.setPropertyValue( + "OutputWorkspace", WS_NAME_NO_CHUNKS)); + TS_ASSERT_THROWS_NOTHING(algWithoutChunks.execute();); + TS_ASSERT(algWithoutChunks.isExecuted()); + + // Retrieve the workspace from data service + EventWorkspace_sptr wsNoChunks; + TS_ASSERT_THROWS_NOTHING( + wsNoChunks = AnalysisDataService::Instance().retrieveWS<EventWorkspace>( + WS_NAME_NO_CHUNKS)); + TS_ASSERT(wsNoChunks); + if (!wsNoChunks) + return; + TS_ASSERT_EQUALS(wsNoChunks->getEventType(), EventType::WEIGHTED_NOTIME); + + // run without chunks + const std::string WS_NAME_CHUNKS("LoadEventAndCompress_chunks"); + LoadEventAndCompress algWithChunks; + TS_ASSERT_THROWS_NOTHING(algWithChunks.initialize()); + TS_ASSERT(algWithChunks.isInitialized()); + TS_ASSERT_THROWS_NOTHING( + algWithChunks.setPropertyValue("Filename", FILENAME)); + TS_ASSERT_THROWS_NOTHING( + algWithChunks.setPropertyValue("OutputWorkspace", WS_NAME_CHUNKS)); + TS_ASSERT_THROWS_NOTHING( + algWithChunks.setProperty("MaxChunkSize", .005)); // REALLY small file + TS_ASSERT_THROWS_NOTHING(algWithChunks.execute();); + TS_ASSERT(algWithChunks.isExecuted()); + + // Retrieve the workspace from data service. TODO: Change to your desired + // type + EventWorkspace_sptr wsWithChunks; + TS_ASSERT_THROWS_NOTHING( + wsWithChunks = + AnalysisDataService::Instance().retrieveWS<EventWorkspace>( + WS_NAME_CHUNKS)); + TS_ASSERT(wsWithChunks); + if (!wsWithChunks) + return; + TS_ASSERT_EQUALS(wsWithChunks->getEventType(), EventType::WEIGHTED_NOTIME); + + // compare the two workspaces + TS_ASSERT_EQUALS(wsWithChunks->getNumberEvents(), + wsNoChunks->getNumberEvents()); + auto checkAlg = + FrameworkManager::Instance().createAlgorithm("CheckWorkspacesMatch"); + checkAlg->setPropertyValue("Workspace1", WS_NAME_NO_CHUNKS); + checkAlg->setPropertyValue("Workspace2", WS_NAME_CHUNKS); + checkAlg->execute(); + TS_ASSERT_EQUALS(checkAlg->getPropertyValue("Result"), "Success!"); + + // Remove workspace from the data service. + AnalysisDataService::Instance().remove(WS_NAME_NO_CHUNKS); + AnalysisDataService::Instance().remove(WS_NAME_CHUNKS); + } +}; + +#endif /* MANTID_WORKFLOWALGORITHMS_LOADEVENTANDCOMPRESSTEST_H_ */ diff --git a/Code/Mantid/docs/source/algorithms/LoadEventAndCompress-v1.rst b/Code/Mantid/docs/source/algorithms/LoadEventAndCompress-v1.rst new file mode 100644 index 0000000000000000000000000000000000000000..904f90502164953738e1a6948991f082077e233c --- /dev/null +++ b/Code/Mantid/docs/source/algorithms/LoadEventAndCompress-v1.rst @@ -0,0 +1,46 @@ + +.. algorithm:: + +.. summary:: + +.. alias:: + +.. properties:: + +Description +----------- + +This is a workflow algorithm that loads an event nexus file in chunks +and compresses the resulting chunks before summing them. It uses the +algorithms: + +#. :ref:`algm-DetermineChunking` +#. :ref:`algm-LoadEventNexus` +#. :ref:`algm-FilterBadPulses` +#. :ref:`algm-CompressEvents` +#. :ref:`algm-Plus` to accumulate + + +Workflow +######## + +.. diagram:: LoadEventAndCompress-v1_wkflw.dot + + +Usage +----- +**Example - LoadEventAndCompress** + +The files needed for this example are not present in our standard usage data +download due to their size. They can however be downloaded using these links: +`PG3_9830_event.nxs <https://github.com/mantidproject/systemtests/blob/master/Data/PG3_9830_event.nxs?raw=true>`_. + + +.. code-block:: python + + PG3_9830_event = LoadEventAndCompress(Filename='PG3_9830_event.nxs', + MaxChunkSize=1.) + +.. categories:: + +.. sourcelink:: diff --git a/Code/Mantid/docs/source/diagrams/LoadEventAndCompress-v1_wkflw.dot b/Code/Mantid/docs/source/diagrams/LoadEventAndCompress-v1_wkflw.dot new file mode 100644 index 0000000000000000000000000000000000000000..83d40ad30b86907827ab4dac682369b6672a8950 --- /dev/null +++ b/Code/Mantid/docs/source/diagrams/LoadEventAndCompress-v1_wkflw.dot @@ -0,0 +1,36 @@ +digraph LoadEventAndCompress { + label="LoadEventAndCompress Flowchart" + $global_style + + subgraph params { + $param_style + file1 [label="Filename"] + file2 [label="Filename"] + OutputWorkspace + MaxChunkSize + FilterBadPulses + } + + subgraph algoritms { + $algorithm_style + loadEventNexus [label="LoadEventNexus v1"] + compressEvents [label="CompressEvents v1"] + determineChunking [label="DetermineChunking v1"] + filterBadPulses [label="FilterBadPulses v1"] + plus [label="Plus v1"] + } + + file1 -> determineChunking + MaxChunkSize -> determineChunking + file2 -> loadEventNexus + determineChunking -> loadEventNexus [label="loop over chunks"] + + loadEventNexus -> filterBadPulses + FilterBadPulses -> filterBadPulses + filterBadPulses -> compressEvents + + compressEvents -> plus + plus -> loadEventNexus [label="accumulate"] + plus -> OutputWorkspace + +}