Skip to content
Snippets Groups Projects
Commit 674c5e02 authored by Lynch, Vickie's avatar Lynch, Vickie
Browse files

Merge pull request #13342 from mantidproject/load_and_compress

LoadEventAndCompress
parents 66bceb82 58b12ead
No related branches found
No related tags found
No related merge requests found
Showing
with 501 additions and 7 deletions
...@@ -56,8 +56,9 @@ protected: ...@@ -56,8 +56,9 @@ protected:
void setAccumAlg(const std::string &alg); void setAccumAlg(const std::string &alg);
void setPropManagerPropName(const std::string &propName); void setPropManagerPropName(const std::string &propName);
void mapPropertyName(const std::string &nameInProp, const std::string &nameInPropManager); void mapPropertyName(const std::string &nameInProp, const std::string &nameInPropManager);
ITableWorkspace_sptr determineChunk(); void copyProperty(API::Algorithm_sptr alg, const std::string& name);
void loadChunk(); virtual ITableWorkspace_sptr determineChunk(const std::string &filename);
virtual MatrixWorkspace_sptr loadChunk(const size_t rowIndex);
Workspace_sptr load(const std::string &inputData, Workspace_sptr load(const std::string &inputData,
const bool loadQuiet = false); const bool loadQuiet = false);
std::vector<std::string> splitInput(const std::string &input); std::vector<std::string> splitInput(const std::string &input);
......
...@@ -109,6 +109,27 @@ void DataProcessorAlgorithm::mapPropertyName(const std::string &nameInProp, ...@@ -109,6 +109,27 @@ void DataProcessorAlgorithm::mapPropertyName(const std::string &nameInProp,
m_nameToPMName[nameInProp] = nameInPropManager; m_nameToPMName[nameInProp] = nameInPropManager;
} }
/**
* Copy a property from an existing algorithm.
*
* @warning This only works if you algorithm is in the WorkflowAlgorithms sub-project.
*
* @param alg
* @param name
*
* @throws std::runtime_error If you ask to copy a non-existent property
*/
void DataProcessorAlgorithm::copyProperty(API::Algorithm_sptr alg, const std::string& name) {
if (! alg->existsProperty(name)) {
std::stringstream msg;
msg << "Algorithm \"" << alg->name() << "\" does not have property \"" << name << "\"";
throw std::runtime_error(msg.str());
}
auto prop = alg->getPointerToProperty(name);
declareProperty(prop->clone(), prop->documentation());
}
/** /**
* Get the property held by this object. If the value is the default see if it * Get the property held by this object. If the value is the default see if it
* contained in the PropertyManager. @see Algorithm::getPropertyValue(const string &) * contained in the PropertyManager. @see Algorithm::getPropertyValue(const string &)
...@@ -161,12 +182,15 @@ PropertyManagerOwner::TypedValue DataProcessorAlgorithm::getProperty(const std:: ...@@ -161,12 +182,15 @@ PropertyManagerOwner::TypedValue DataProcessorAlgorithm::getProperty(const std::
return Algorithm::getProperty(name); return Algorithm::getProperty(name);
} }
ITableWorkspace_sptr DataProcessorAlgorithm::determineChunk() { ITableWorkspace_sptr DataProcessorAlgorithm::determineChunk(const std::string &filename) {
UNUSED_ARG(filename);
throw std::runtime_error( throw std::runtime_error(
"DataProcessorAlgorithm::determineChunk is not implemented"); "DataProcessorAlgorithm::determineChunk is not implemented");
} }
void DataProcessorAlgorithm::loadChunk() { MatrixWorkspace_sptr DataProcessorAlgorithm::loadChunk(const size_t rowIndex) {
UNUSED_ARG(rowIndex);
throw std::runtime_error( throw std::runtime_error(
"DataProcessorAlgorithm::loadChunk is not implemented"); "DataProcessorAlgorithm::loadChunk is not implemented");
......
...@@ -62,11 +62,11 @@ public: ...@@ -62,11 +62,11 @@ public:
void setAccumAlgProxy(const std::string &alg) { this->setAccumAlg(alg); } void setAccumAlgProxy(const std::string &alg) { this->setAccumAlg(alg); }
API::ITableWorkspace_sptr determineChunkProxy() { API::ITableWorkspace_sptr determineChunkProxy(const std::string &filename) {
return this->determineChunk(); return this->determineChunk(filename);
} }
void loadChunkProxy() { this->loadChunk(); } void loadChunkProxy(const size_t rowIndex) { this->loadChunk(rowIndex); }
API::Workspace_sptr loadProxy(const std::string &inputData, API::Workspace_sptr loadProxy(const std::string &inputData,
const bool loadQuiet = false) { const bool loadQuiet = false) {
......
...@@ -18,6 +18,7 @@ set ( SRC_FILES ...@@ -18,6 +18,7 @@ set ( SRC_FILES
src/HFIRInstrument.cpp src/HFIRInstrument.cpp
src/HFIRLoad.cpp src/HFIRLoad.cpp
src/HFIRSANSNormalise.cpp src/HFIRSANSNormalise.cpp
src/LoadEventAndCompress.cpp
src/MuonCalculateAsymmetry.cpp src/MuonCalculateAsymmetry.cpp
src/MuonLoad.cpp src/MuonLoad.cpp
src/RefReduction.cpp src/RefReduction.cpp
...@@ -57,6 +58,7 @@ set ( INC_FILES ...@@ -57,6 +58,7 @@ set ( INC_FILES
inc/MantidWorkflowAlgorithms/HFIRInstrument.h inc/MantidWorkflowAlgorithms/HFIRInstrument.h
inc/MantidWorkflowAlgorithms/HFIRLoad.h inc/MantidWorkflowAlgorithms/HFIRLoad.h
inc/MantidWorkflowAlgorithms/HFIRSANSNormalise.h inc/MantidWorkflowAlgorithms/HFIRSANSNormalise.h
inc/MantidWorkflowAlgorithms/LoadEventAndCompress.h
inc/MantidWorkflowAlgorithms/MuonCalculateAsymmetry.h inc/MantidWorkflowAlgorithms/MuonCalculateAsymmetry.h
inc/MantidWorkflowAlgorithms/MuonLoad.h inc/MantidWorkflowAlgorithms/MuonLoad.h
inc/MantidWorkflowAlgorithms/RefReduction.h inc/MantidWorkflowAlgorithms/RefReduction.h
...@@ -74,6 +76,7 @@ set ( INC_FILES ...@@ -74,6 +76,7 @@ set ( INC_FILES
) )
set ( TEST_FILES set ( TEST_FILES
LoadEventAndCompressTest.h
MuonCalculateAsymmetryTest.h MuonCalculateAsymmetryTest.h
MuonLoadTest.h MuonLoadTest.h
SANSSolidAngleCorrectionTest.h SANSSolidAngleCorrectionTest.h
......
#ifndef MANTID_WORKFLOWALGORITHMS_LOADEVENTANDCOMPRESS_H_
#define MANTID_WORKFLOWALGORITHMS_LOADEVENTANDCOMPRESS_H_
#include "MantidKernel/System.h"
#include "MantidAPI/DataProcessorAlgorithm.h"
#include "MantidAPI/ITableWorkspace_fwd.h"
namespace Mantid {
namespace WorkflowAlgorithms {
/** LoadEventAndCompress : TODO: DESCRIPTION
Copyright &copy; 2015 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge
National Laboratory & European Spallation Source
This file is part of Mantid.
Mantid is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
Mantid is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
File change history is stored at: <https://github.com/mantidproject/mantid>
Code Documentation is available at: <http://doxygen.mantidproject.org>
*/
class DLLExport LoadEventAndCompress : public API::DataProcessorAlgorithm {
public:
LoadEventAndCompress();
virtual ~LoadEventAndCompress();
virtual const std::string name() const;
virtual int version() const;
virtual const std::string category() const;
virtual const std::string summary() const;
protected:
API::ITableWorkspace_sptr determineChunk(const std::string &filename);
API::MatrixWorkspace_sptr loadChunk(const size_t rowIndex);
void processChunk(API::MatrixWorkspace_sptr wksp);
private:
void init();
void exec();
std::string m_filename;
double m_filterBadPulses;
API::ITableWorkspace_sptr m_chunkingTable;
};
} // namespace WorkflowAlgorithms
} // namespace Mantid
#endif /* MANTID_WORKFLOWALGORITHMS_LOADEVENTANDCOMPRESS_H_ */
#include "MantidWorkflowAlgorithms/LoadEventAndCompress.h"
#include "MantidAPI/AlgorithmManager.h"
#include "MantidAPI/FileProperty.h"
#include "MantidAPI/FrameworkManager.h"
#include "MantidAPI/ITableWorkspace.h"
#include "MantidDataObjects/EventWorkspace.h"
#include "MantidKernel/ArrayProperty.h"
#include "MantidKernel/BoundedValidator.h"
#include "MantidKernel/VisibleWhenProperty.h"
namespace Mantid {
namespace WorkflowAlgorithms {
using std::size_t;
using std::string;
using namespace Kernel;
using namespace API;
using namespace DataObjects;
// Register the algorithm into the AlgorithmFactory
DECLARE_ALGORITHM(LoadEventAndCompress)
//----------------------------------------------------------------------------------------------
/** Constructor
*/
LoadEventAndCompress::LoadEventAndCompress() : m_filterBadPulses(EMPTY_DBL()) {}
//----------------------------------------------------------------------------------------------
/** Destructor
*/
LoadEventAndCompress::~LoadEventAndCompress() {}
//----------------------------------------------------------------------------------------------
/// Algorithms name for identification. @see Algorithm::name
const string LoadEventAndCompress::name() const { return "LoadEventAndCompress"; }
/// Algorithm's version for identification. @see Algorithm::version
int LoadEventAndCompress::version() const { return 1; }
/// Algorithm's category for identification. @see Algorithm::category
const string LoadEventAndCompress::category() const {
return "Workflow\\DataHandling";
}
/// Algorithm's summary for use in the GUI and help. @see Algorithm::summary
const string LoadEventAndCompress::summary() const {
return "Load an event workspace by chunks and compress";
}
//----------------------------------------------------------------------------------------------
/** Initialize the algorithm's properties.
*/
void LoadEventAndCompress::init() {
// algorithms to copy properties from
auto algLoadEventNexus = AlgorithmManager::Instance().createUnmanaged("LoadEventNexus");
algLoadEventNexus->initialize();
auto algDetermineChunking =
AlgorithmManager::Instance().createUnmanaged("DetermineChunking");
algDetermineChunking->initialize();
// declare properties
copyProperty(algLoadEventNexus, "Filename");
copyProperty(algLoadEventNexus, "OutputWorkspace");
copyProperty(algDetermineChunking, "MaxChunkSize");
copyProperty(algLoadEventNexus, "FilterByTofMin");
copyProperty(algLoadEventNexus, "FilterByTofMax");
copyProperty(algLoadEventNexus, "FilterByTimeStart");
copyProperty(algLoadEventNexus, "FilterByTimeStop");
std::string grp1 = "Filter Events";
setPropertyGroup("FilterByTofMin", grp1);
setPropertyGroup("FilterByTofMax", grp1);
setPropertyGroup("FilterByTimeStart", grp1);
setPropertyGroup("FilterByTimeStop", grp1);
copyProperty(algLoadEventNexus, "NXentryName");
copyProperty(algLoadEventNexus, "LoadMonitors");
copyProperty(algLoadEventNexus, "MonitorsAsEvents");
copyProperty(algLoadEventNexus, "FilterMonByTofMin");
copyProperty(algLoadEventNexus, "FilterMonByTofMax");
copyProperty(algLoadEventNexus, "FilterMonByTimeStart");
copyProperty(algLoadEventNexus, "FilterMonByTimeStop");
setPropertySettings(
"MonitorsAsEvents",
new VisibleWhenProperty("LoadMonitors", IS_EQUAL_TO, "1"));
IPropertySettings *asEventsIsOn =
new VisibleWhenProperty("MonitorsAsEvents", IS_EQUAL_TO, "1");
setPropertySettings("FilterMonByTofMin", asEventsIsOn);
setPropertySettings("FilterMonByTofMax", asEventsIsOn->clone());
setPropertySettings("FilterMonByTimeStart", asEventsIsOn->clone());
setPropertySettings("FilterMonByTimeStop", asEventsIsOn->clone());
std::string grp4 = "Monitors";
setPropertyGroup("LoadMonitors", grp4);
setPropertyGroup("MonitorsAsEvents", grp4);
setPropertyGroup("FilterMonByTofMin", grp4);
setPropertyGroup("FilterMonByTofMax", grp4);
setPropertyGroup("FilterMonByTimeStart", grp4);
setPropertyGroup("FilterMonByTimeStop", grp4);
auto range = boost::make_shared<BoundedValidator<double>>();
range->setBounds(0., 100.);
declareProperty("FilterBadPulses", 95., range);
}
/// @see DataProcessorAlgorithm::determineChunk(const std::string &)
ITableWorkspace_sptr
LoadEventAndCompress::determineChunk(const std::string &filename) {
double maxChunkSize = getProperty("MaxChunkSize");
auto alg = createChildAlgorithm("DetermineChunking");
alg->setProperty("Filename", filename);
alg->setProperty("MaxChunkSize", maxChunkSize);
alg->executeAsChildAlg();
ITableWorkspace_sptr chunkingTable = alg->getProperty("OutputWorkspace");
if (chunkingTable->rowCount() > 1)
g_log.information() << "Will load data in " << chunkingTable->rowCount()
<< " chunks\n";
else
g_log.information("Not chunking");
return chunkingTable;
}
/// @see DataProcessorAlgorithm::loadChunk(const size_t)
MatrixWorkspace_sptr LoadEventAndCompress::loadChunk(const size_t rowIndex) {
g_log.debug() << "loadChunk(" << rowIndex << ")\n";
double rowCount = static_cast<double>(m_chunkingTable->rowCount());
double progStart = static_cast<double>(rowIndex) / rowCount;
double progStop = static_cast<double>(rowIndex + 1) / rowCount;
auto alg = createChildAlgorithm("LoadEventNexus", progStart, progStop, true);
alg->setProperty<string>("Filename", getProperty("Filename"));
alg->setProperty<double>("FilterByTofMin", getProperty("FilterByTofMin"));
alg->setProperty<double>("FilterByTofMax", getProperty("FilterByTofMax"));
alg->setProperty<double>("FilterByTimeStart",
getProperty("FilterByTimeStart"));
alg->setProperty<double>("FilterByTimeStop", getProperty("FilterByTimeStop"));
alg->setProperty<string>("NXentryName", getProperty("NXentryName"));
alg->setProperty<bool>("LoadMonitors", getProperty("LoadMonitors"));
alg->setProperty<bool>("MonitorsAsEvents", getProperty("MonitorsAsEvents"));
alg->setProperty<double>("FilterMonByTofMin",
getProperty("FilterMonByTofMin"));
alg->setProperty<double>("FilterMonByTofMax",
getProperty("FilterMonByTofMax"));
alg->setProperty<double>("FilterMonByTimeStart",
getProperty("FilterMonByTimeStart"));
alg->setProperty<double>("FilterMonByTimeStop",
getProperty("FilterMonByTimeStop"));
// set chunking information
if (rowCount > 0.) {
const std::vector<string> COL_NAMES = m_chunkingTable->getColumnNames();
for (auto name = COL_NAMES.begin(); name != COL_NAMES.end(); ++name) {
alg->setProperty(*name, m_chunkingTable->getRef<int>(*name, rowIndex));
}
}
alg->executeAsChildAlg();
Workspace_sptr wksp = alg->getProperty("OutputWorkspace");
return boost::dynamic_pointer_cast<MatrixWorkspace>(wksp);
}
/**
* Process a chunk in-place
*
* @param wksp
*/
void LoadEventAndCompress::processChunk(API::MatrixWorkspace_sptr wksp) {
EventWorkspace_sptr eventWS =
boost::dynamic_pointer_cast<EventWorkspace>(wksp);
if (m_filterBadPulses > 0.) {
auto filterBadPulses = createChildAlgorithm("FilterBadPulses");
filterBadPulses->setProperty("InputWorkspace", eventWS);
filterBadPulses->setProperty("OutputWorkspace", eventWS);
filterBadPulses->setProperty("LowerCutoff", m_filterBadPulses);
filterBadPulses->executeAsChildAlg();
}
auto compressEvents = createChildAlgorithm("CompressEvents");
compressEvents->setProperty("InputWorkspace", eventWS);
compressEvents->setProperty("OutputWorkspace", eventWS);
compressEvents->executeAsChildAlg();
}
//----------------------------------------------------------------------------------------------
/** Execute the algorithm.
*/
void LoadEventAndCompress::exec() {
m_filename = getPropertyValue("Filename");
m_filterBadPulses = getProperty("FilterBadPulses");
m_chunkingTable = determineChunk(m_filename);
// first run is free
EventWorkspace_sptr resultWS =
boost::dynamic_pointer_cast<EventWorkspace>(loadChunk(0));
processChunk(resultWS);
// load the other chunks
const size_t numRows = m_chunkingTable->rowCount();
for (size_t i = 1; i < numRows; ++i) {
MatrixWorkspace_sptr temp = loadChunk(i);
processChunk(temp);
auto alg = createChildAlgorithm("Plus");
alg->setProperty("LHSWorkspace", resultWS);
alg->setProperty("RHSWorkspace", temp);
alg->setProperty("OutputWorkspace", resultWS);
alg->setProperty("ClearRHSWorkspace", true);
alg->executeAsChildAlg();
}
// Don't bother compressing combined workspace. DetermineChunking is designed
// to prefer loading full banks so no further savings should be available.
setProperty("OutputWorkspace", resultWS);
}
} // namespace WorkflowAlgorithms
} // namespace Mantid
#ifndef MANTID_WORKFLOWALGORITHMS_LOADEVENTANDCOMPRESSTEST_H_
#define MANTID_WORKFLOWALGORITHMS_LOADEVENTANDCOMPRESSTEST_H_
#include <cxxtest/TestSuite.h>
#include "MantidAPI/FrameworkManager.h"
#include "MantidDataObjects/EventWorkspace.h"
#include "MantidWorkflowAlgorithms/LoadEventAndCompress.h"
using Mantid::WorkflowAlgorithms::LoadEventAndCompress;
using namespace Mantid::DataObjects;
using namespace Mantid::API;
class LoadEventAndCompressTest : public CxxTest::TestSuite {
public:
// This pair of boilerplate methods prevent the suite being created statically
// This means the constructor isn't called when running other tests
static LoadEventAndCompressTest *createSuite() {
return new LoadEventAndCompressTest();
}
static void destroySuite(LoadEventAndCompressTest *suite) { delete suite; }
void test_Init() {
LoadEventAndCompress alg;
TS_ASSERT_THROWS_NOTHING(alg.initialize());
TS_ASSERT(alg.isInitialized());
}
void test_exec() {
const std::string FILENAME("ARCS_sim_event.nxs");
// run without chunks
const std::string WS_NAME_NO_CHUNKS("LoadEventAndCompress_no_chunks");
LoadEventAndCompress algWithoutChunks;
TS_ASSERT_THROWS_NOTHING(algWithoutChunks.initialize());
TS_ASSERT(algWithoutChunks.isInitialized());
TS_ASSERT_THROWS_NOTHING(
algWithoutChunks.setPropertyValue("Filename", FILENAME));
TS_ASSERT_THROWS_NOTHING(algWithoutChunks.setPropertyValue(
"OutputWorkspace", WS_NAME_NO_CHUNKS));
TS_ASSERT_THROWS_NOTHING(algWithoutChunks.execute(););
TS_ASSERT(algWithoutChunks.isExecuted());
// Retrieve the workspace from data service
EventWorkspace_sptr wsNoChunks;
TS_ASSERT_THROWS_NOTHING(
wsNoChunks = AnalysisDataService::Instance().retrieveWS<EventWorkspace>(
WS_NAME_NO_CHUNKS));
TS_ASSERT(wsNoChunks);
if (!wsNoChunks)
return;
TS_ASSERT_EQUALS(wsNoChunks->getEventType(), EventType::WEIGHTED_NOTIME);
// run without chunks
const std::string WS_NAME_CHUNKS("LoadEventAndCompress_chunks");
LoadEventAndCompress algWithChunks;
TS_ASSERT_THROWS_NOTHING(algWithChunks.initialize());
TS_ASSERT(algWithChunks.isInitialized());
TS_ASSERT_THROWS_NOTHING(
algWithChunks.setPropertyValue("Filename", FILENAME));
TS_ASSERT_THROWS_NOTHING(
algWithChunks.setPropertyValue("OutputWorkspace", WS_NAME_CHUNKS));
TS_ASSERT_THROWS_NOTHING(
algWithChunks.setProperty("MaxChunkSize", .005)); // REALLY small file
TS_ASSERT_THROWS_NOTHING(algWithChunks.execute(););
TS_ASSERT(algWithChunks.isExecuted());
// Retrieve the workspace from data service. TODO: Change to your desired
// type
EventWorkspace_sptr wsWithChunks;
TS_ASSERT_THROWS_NOTHING(
wsWithChunks =
AnalysisDataService::Instance().retrieveWS<EventWorkspace>(
WS_NAME_CHUNKS));
TS_ASSERT(wsWithChunks);
if (!wsWithChunks)
return;
TS_ASSERT_EQUALS(wsWithChunks->getEventType(), EventType::WEIGHTED_NOTIME);
// compare the two workspaces
TS_ASSERT_EQUALS(wsWithChunks->getNumberEvents(),
wsNoChunks->getNumberEvents());
auto checkAlg =
FrameworkManager::Instance().createAlgorithm("CheckWorkspacesMatch");
checkAlg->setPropertyValue("Workspace1", WS_NAME_NO_CHUNKS);
checkAlg->setPropertyValue("Workspace2", WS_NAME_CHUNKS);
checkAlg->execute();
TS_ASSERT_EQUALS(checkAlg->getPropertyValue("Result"), "Success!");
// Remove workspace from the data service.
AnalysisDataService::Instance().remove(WS_NAME_NO_CHUNKS);
AnalysisDataService::Instance().remove(WS_NAME_CHUNKS);
}
};
#endif /* MANTID_WORKFLOWALGORITHMS_LOADEVENTANDCOMPRESSTEST_H_ */
.. algorithm::
.. summary::
.. alias::
.. properties::
Description
-----------
This is a workflow algorithm that loads an event nexus file in chunks
and compresses the resulting chunks before summing them. It uses the
algorithms:
#. :ref:`algm-DetermineChunking`
#. :ref:`algm-LoadEventNexus`
#. :ref:`algm-FilterBadPulses`
#. :ref:`algm-CompressEvents`
#. :ref:`algm-Plus` to accumulate
Workflow
########
.. diagram:: LoadEventAndCompress-v1_wkflw.dot
Usage
-----
**Example - LoadEventAndCompress**
The files needed for this example are not present in our standard usage data
download due to their size. They can however be downloaded using these links:
`PG3_9830_event.nxs <https://github.com/mantidproject/systemtests/blob/master/Data/PG3_9830_event.nxs?raw=true>`_.
.. code-block:: python
PG3_9830_event = LoadEventAndCompress(Filename='PG3_9830_event.nxs',
MaxChunkSize=1.)
.. categories::
.. sourcelink::
digraph LoadEventAndCompress {
label="LoadEventAndCompress Flowchart"
$global_style
subgraph params {
$param_style
file1 [label="Filename"]
file2 [label="Filename"]
OutputWorkspace
MaxChunkSize
FilterBadPulses
}
subgraph algoritms {
$algorithm_style
loadEventNexus [label="LoadEventNexus v1"]
compressEvents [label="CompressEvents v1"]
determineChunking [label="DetermineChunking v1"]
filterBadPulses [label="FilterBadPulses v1"]
plus [label="Plus v1"]
}
file1 -> determineChunking
MaxChunkSize -> determineChunking
file2 -> loadEventNexus
determineChunking -> loadEventNexus [label="loop over chunks"]
loadEventNexus -> filterBadPulses
FilterBadPulses -> filterBadPulses
filterBadPulses -> compressEvents
compressEvents -> plus
plus -> loadEventNexus [label="accumulate"]
plus -> OutputWorkspace
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment