diff --git a/Framework/API/CMakeLists.txt b/Framework/API/CMakeLists.txt index a717a8b0f1c3b2bc549fc91bc6b380f8ef179ce8..2a5ad82cd64387d67ea777a541e737921aa28529 100644 --- a/Framework/API/CMakeLists.txt +++ b/Framework/API/CMakeLists.txt @@ -112,6 +112,7 @@ set ( SRC_FILES src/ParamFunction.cpp src/ParameterReference.cpp src/ParameterTie.cpp + src/ParallelAlgorithm.cpp src/PeakFunctionIntegrator.cpp src/Progress.cpp src/Projection.cpp @@ -307,6 +308,7 @@ set ( INC_FILES inc/MantidAPI/ParamFunction.h inc/MantidAPI/ParameterReference.h inc/MantidAPI/ParameterTie.h + inc/MantidAPI/ParallelAlgorithm.h inc/MantidAPI/PeakFunctionIntegrator.h inc/MantidAPI/Progress.h inc/MantidAPI/Projection.h diff --git a/Framework/API/inc/MantidAPI/Algorithm.h b/Framework/API/inc/MantidAPI/Algorithm.h index c3402241030192dbc29710df444af9770a6b9667..99b06db1bcda1eaaf9407db85902ac1a803e3cbd 100644 --- a/Framework/API/inc/MantidAPI/Algorithm.h +++ b/Framework/API/inc/MantidAPI/Algorithm.h @@ -10,13 +10,11 @@ // -- These headers will (most-likely) be used by every inheriting algorithm #include "MantidAPI/AlgorithmFactory.h" //for the factory macro -#include "MantidAPI/IndexTypeProperty.h" #include "MantidAPI/Progress.h" #include "MantidAPI/WorkspaceOpOverloads.h" #include "MantidAPI/WorkspaceProperty.h" #include "MantidKernel/EmptyValues.h" #include "MantidKernel/MultiThreaded.h" -#include <MantidIndexing/SpectrumIndexSet.h> #include "MantidParallel/ExecutionMode.h" #include "MantidParallel/StorageMode.h" @@ -37,6 +35,9 @@ class Value; } namespace Mantid { +namespace Indexing { +class SpectrumIndexSet; +} namespace Parallel { class Communicator; } @@ -201,7 +202,7 @@ public: std::is_convertible<T1 *, MatrixWorkspace *>::value>::type, typename = typename std::enable_if< std::is_convertible<T2 *, std::string *>::value || - std::is_convertible<T2 *, std::vector<int> *>::value>::type> + std::is_convertible<T2 *, std::vector<int64_t> *>::value>::type> void setWorkspaceInputProperties(const std::string &name, const boost::shared_ptr<T1> &wksp, IndexType type, const T2 &list); @@ -211,7 +212,7 @@ public: std::is_convertible<T1 *, MatrixWorkspace *>::value>::type, typename = typename std::enable_if< std::is_convertible<T2 *, std::string *>::value || - std::is_convertible<T2 *, std::vector<int> *>::value>::type> + std::is_convertible<T2 *, std::vector<int64_t> *>::value>::type> void setWorkspaceInputProperties(const std::string &name, const std::string &wsName, IndexType type, const T2 &list); @@ -291,6 +292,10 @@ public: const std::string &name, const double startProgress = -1., const double endProgress = -1., const bool enableLogging = true, const int &version = -1); + void setupAsChildAlgorithm(boost::shared_ptr<Algorithm> algorithm, + const double startProgress = -1., + const double endProgress = -1., + const bool enableLogging = true); /// set whether we wish to track the child algorithm's history and pass it the /// parent object to fill. @@ -403,13 +408,13 @@ protected: /// versions bool m_usingBaseProcessGroups = false; - template <typename T, typename = typename std::enable_if<std::is_convertible< - T *, MatrixWorkspace *>::value>::type> - void declareWorkspaceInputProperties( - const std::string &propertyName, - const int allowedIndexTypes = IndexType::WorkspaceIndex, - PropertyMode::Type optional = PropertyMode::Type::Mandatory, - LockMode::Type lock = LockMode::Type::Lock, const std::string &doc = ""); + template <typename T, const int AllowedIndexTypes = IndexType::WorkspaceIndex, + typename... WSPropArgs, + typename = typename std::enable_if< + std::is_convertible<T *, MatrixWorkspace *>::value>::type> + void declareWorkspaceInputProperties(const std::string &propertyName, + const std::string &doc, + WSPropArgs &&... wsPropArgs); private: template <typename T1, typename T2, typename WsType> diff --git a/Framework/API/inc/MantidAPI/Algorithm.tcc b/Framework/API/inc/MantidAPI/Algorithm.tcc index e27d1538ca352652e6fe4db7de1f3eeda3dcbfb7..b5f1f07fb1d89152bf7348dd2b67d11a4f26f271 100644 --- a/Framework/API/inc/MantidAPI/Algorithm.tcc +++ b/Framework/API/inc/MantidAPI/Algorithm.tcc @@ -1,3 +1,6 @@ +#ifndef MANTID_API_ALGORITHM_TCC_ +#define MANTID_API_ALGORITHM_TCC_ + #include "MantidAPI/Algorithm.h" #include "MantidAPI/IndexProperty.h" #include "MantidAPI/WorkspaceProperty.h" @@ -25,34 +28,40 @@ namespace API { @param propertyName Name of property which will be reserved @param allowedIndexTypes combination of allowed index types. Default IndexType::WorkspaceIndex -@param optional Determines if workspace property is optional. Default -PropertyMode::Type::Mandatory -@param lock Determines whether or not the workspace is locked. Default -LockMode::Type::Lock +@param wsPropArgs a parameter pack of arguments forwarded to WorkspaceProperty. +Can contain PropertyMode, LockMode, and validators. @param doc Property documentation string. */ -template <typename T, typename> +template <typename T, const int AllowedIndexTypes, typename... WSPropArgs, + typename> void Algorithm::declareWorkspaceInputProperties(const std::string &propertyName, - const int allowedIndexTypes, - PropertyMode::Type optional, - LockMode::Type lock, - const std::string &doc) { + const std::string &doc, + WSPropArgs &&... wsPropArgs) { auto wsProp = Kernel::make_unique<WorkspaceProperty<T>>( - propertyName, "", Kernel::Direction::Input, optional, lock); + propertyName, "", Kernel::Direction::Input, + std::forward<WSPropArgs>(wsPropArgs)...); const auto &wsPropRef = *wsProp; declareProperty(std::move(wsProp), doc); auto indexTypePropName = IndexTypeProperty::generatePropertyName(propertyName); auto indexTypeProp = Kernel::make_unique<IndexTypeProperty>( - indexTypePropName, allowedIndexTypes); + indexTypePropName, AllowedIndexTypes); const auto &indexTypePropRef = *indexTypeProp; - declareProperty(std::move(indexTypeProp)); + declareProperty(std::move(indexTypeProp), + "The type of indices in the optional index set; For optimal " + "performance WorkspaceIndex should be preferred;"); auto indexPropName = IndexProperty::generatePropertyName(propertyName); declareProperty(Kernel::make_unique<IndexProperty>(indexPropName, wsPropRef, - indexTypePropRef)); + indexTypePropRef), + "An optional set of spectra that will be processed by the " + "algorithm; If not set, all spectra will be processed; The " + "indices in this list can be workspace indices or possibly " + "spectrum numbers, depending on the selection made for the " + "index type; Indices are entered as a comma-separated list " + "of values, and/or ranges; For example, '4,6,10-20,1000';"); m_reservedList.push_back(propertyName); m_reservedList.push_back(indexTypePropName); @@ -101,7 +110,7 @@ void Algorithm::setWorkspaceInputProperties(const std::string &name, /** Mechanism for setting the index property with a workspace shared pointer. * This method can only be used if T1 is convertible to a MatrixWorkspace and -* T2 is either std::string or std::vector<int> +* T2 is either std::string or std::vector<int64_t> @param name Property name @param wsName Workspace name as string @@ -146,4 +155,6 @@ Algorithm::getWorkspaceAndIndices(const std::string &name) const { return std::make_tuple(ws, indexSet); } } // namespace API -} // namespace Mantid \ No newline at end of file +} // namespace Mantid + +#endif /*MANTID_API_ALGORITHM_TCC_*/ diff --git a/Framework/API/inc/MantidAPI/ISpectrum.h b/Framework/API/inc/MantidAPI/ISpectrum.h index 764002a85bf313193cb2882b254572171b4fb867..25137e84324db06c3bcea266a156acf55266527f 100644 --- a/Framework/API/inc/MantidAPI/ISpectrum.h +++ b/Framework/API/inc/MantidAPI/ISpectrum.h @@ -8,7 +8,12 @@ #include <set> +class SpectrumTester; namespace Mantid { +namespace DataObjects { +class Histogram1D; +class EventList; +} namespace API { class MatrixWorkspace; @@ -54,6 +59,9 @@ public: void copyInfoFrom(const ISpectrum &other); + /// Copy data from another ISpectrum with double-dynamic dispatch. + virtual void copyDataFrom(const ISpectrum &source) = 0; + virtual void setX(const Kernel::cow_ptr<HistogramData::HistogramX> &X) = 0; virtual MantidVec &dataX() = 0; virtual const MantidVec &dataX() const = 0; @@ -242,6 +250,10 @@ public: void setMatrixWorkspace(MatrixWorkspace *matrixWorkspace, const size_t index); + virtual void copyDataInto(DataObjects::EventList &) const; + virtual void copyDataInto(DataObjects::Histogram1D &) const; + virtual void copyDataInto(SpectrumTester &) const; + protected: virtual void checkAndSanitizeHistogram(HistogramData::Histogram &){}; virtual void checkWorksWithPoints() const {} diff --git a/Framework/API/inc/MantidAPI/IndexProperty.h b/Framework/API/inc/MantidAPI/IndexProperty.h index acddee7702027616b5e1644ef6bb181a66e0dfe1..1952a88d54c173ceae0e07af0c54bd02043681ce 100644 --- a/Framework/API/inc/MantidAPI/IndexProperty.h +++ b/Framework/API/inc/MantidAPI/IndexProperty.h @@ -8,6 +8,9 @@ #include "MantidKernel/ArrayProperty.h" namespace Mantid { +namespace Indexing { +class IndexInfo; +} namespace API { /** IndexProperty : Implementation of a property type which returns a @@ -40,7 +43,7 @@ namespace API { File change history is stored at: <https://github.com/mantidproject/mantid> Code Documentation is available at: <http://doxygen.mantidproject.org> */ -class MANTID_API_DLL IndexProperty : public Kernel::ArrayProperty<int> { +class MANTID_API_DLL IndexProperty : public Kernel::ArrayProperty<int64_t> { public: IndexProperty(const std::string &name, const IWorkspaceProperty &workspaceProp, @@ -50,17 +53,20 @@ public: IndexProperty *clone() const override; - using Kernel::ArrayProperty<int>::operator=; + using Kernel::ArrayProperty<int64_t>::operator=; bool isDefault() const override; std::string isValid() const override; std::string operator=(const std::string &rhs); operator Indexing::SpectrumIndexSet() const; Indexing::SpectrumIndexSet getIndices() const; + Indexing::IndexInfo getFilteredIndexInfo() const; static std::string generatePropertyName(const std::string &name = ""); private: + const Indexing::IndexInfo &getIndexInfoFromWorkspace() const; + const IWorkspaceProperty &m_workspaceProp; const IndexTypeProperty &m_indexTypeProp; mutable Indexing::SpectrumIndexSet m_indices; @@ -71,4 +77,4 @@ private: } // namespace API } // namespace Mantid -#endif /* MANTID_API_INDEXPROPERTY_H_ */ \ No newline at end of file +#endif /* MANTID_API_INDEXPROPERTY_H_ */ diff --git a/Framework/API/inc/MantidAPI/MatrixWorkspace.h b/Framework/API/inc/MantidAPI/MatrixWorkspace.h index 45ed1ea4688dbb825f2cdff168379f008e4a7599..32dba628494e8065b97bd633bc3b34eb4f3f80d8 100644 --- a/Framework/API/inc/MantidAPI/MatrixWorkspace.h +++ b/Framework/API/inc/MantidAPI/MatrixWorkspace.h @@ -451,6 +451,7 @@ public: /// index, weight> typedef std::map<size_t, double> MaskList; const MaskList &maskedBins(const size_t &workspaceIndex) const; + void setMaskedBins(const size_t workspaceIndex, const MaskList &maskedBins); // Methods handling the internal monitor workspace virtual void diff --git a/Framework/API/inc/MantidAPI/ParallelAlgorithm.h b/Framework/API/inc/MantidAPI/ParallelAlgorithm.h new file mode 100644 index 0000000000000000000000000000000000000000..98664a2e189358ee5252255e9c945a063f85fd2c --- /dev/null +++ b/Framework/API/inc/MantidAPI/ParallelAlgorithm.h @@ -0,0 +1,61 @@ +#ifndef MANTID_API_PARALLELALGORITHM_H_ +#define MANTID_API_PARALLELALGORITHM_H_ + +#include "MantidAPI/Algorithm.h" +#include "MantidAPI/DllConfig.h" + +namespace Mantid { +namespace API { + +/** Base class for algorithms that treat all spectra independently, i.e., we can + trivially parallelize over the spectra without changes. The assumption is that + we have one input and one output workspace. The storage mode is just + propagated from input to output. When a specific algorithm is determined to be + trivially parallel (this is a manual process), the only required change to add + MPI support is to inherit from this class instead of Algorithm. Inheriting + from ParallelAlgorithm instead of from Algorithm provides the necessary + overriden method(s) to allow running an algorithm with MPI. This works under + the following conditions: + 1. The algorithm must have a single input and a single output workspace. + 2. No output files may be written since filenames would clash. + Algorithms that do not modify spectra in a workspace may also use this base + class to support MPI. For example, modifications of the instrument are handled + in a identical manner on all MPI ranks, without requiring changes to the + algorithm, other than setting the correct execution mode via the overloads + provided by ParallelAlgorithm. + + @author Simon Heybrock + @date 2017 + + Copyright © 2017 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge + National Laboratory & European Spallation Source + + This file is part of Mantid. + + Mantid is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + Mantid is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. + + File change history is stored at: <https://github.com/mantidproject/mantid> + Code Documentation is available at: <http://doxygen.mantidproject.org> +*/ +class MANTID_API_DLL ParallelAlgorithm : public Algorithm { +protected: + Parallel::ExecutionMode getParallelExecutionMode( + const std::map<std::string, Parallel::StorageMode> &storageModes) + const override; +}; + +} // namespace API +} // namespace Mantid + +#endif /* MANTID_API_PARALLELALGORITHM_H_ */ diff --git a/Framework/API/src/Algorithm.cpp b/Framework/API/src/Algorithm.cpp index 5204ce09f55aee54497d96c57e999b6416985a7e..fe785b499ef4b21cb3d5c446bdd4ae2cb5dd96ad 100644 --- a/Framework/API/src/Algorithm.cpp +++ b/Framework/API/src/Algorithm.cpp @@ -743,6 +743,20 @@ Algorithm_sptr Algorithm::createChildAlgorithm(const std::string &name, const int &version) { Algorithm_sptr alg = AlgorithmManager::Instance().createUnmanaged(name, version); + setupAsChildAlgorithm(alg, startProgress, endProgress, enableLogging); + return alg; +} + +/** Setup algorithm as child algorithm. + * + * Used internally by createChildAlgorithm. Arguments are as documented there. + * Can also be used manually for algorithms created otherwise. This allows + * running algorithms that are not declared into the factory as child + * algorithms. */ +void Algorithm::setupAsChildAlgorithm(Algorithm_sptr alg, + const double startProgress, + const double endProgress, + const bool enableLogging) { // set as a child alg->setChild(true); alg->setLogging(enableLogging); @@ -751,8 +765,8 @@ Algorithm_sptr Algorithm::createChildAlgorithm(const std::string &name, try { alg->initialize(); } catch (std::runtime_error &) { - throw std::runtime_error("Unable to initialise Child Algorithm '" + name + - "'"); + throw std::runtime_error("Unable to initialise Child Algorithm '" + + alg->name() + "'"); } // If output workspaces are nameless, give them a temporary name to satisfy @@ -783,8 +797,6 @@ Algorithm_sptr Algorithm::createChildAlgorithm(const std::string &name, PARALLEL_CRITICAL(Algorithm_StoreWeakPtr) { m_ChildAlgorithms.push_back(weakPtr); } - - return alg; } //============================================================================================= diff --git a/Framework/API/src/ISpectrum.cpp b/Framework/API/src/ISpectrum.cpp index 3bdb384dee1c4953fc39519dfa6dedbcde64bdf6..5006b7be2b72ab6b4903d5d146b27b4f932952db 100644 --- a/Framework/API/src/ISpectrum.cpp +++ b/Framework/API/src/ISpectrum.cpp @@ -199,5 +199,19 @@ void ISpectrum::invalidateSpectrumDefinition() const { m_matrixWorkspace->invalidateSpectrumDefinition(m_index); } +/// Override in child classes for polymorphic copying of data. +void ISpectrum::copyDataInto(DataObjects::EventList &) const { + throw std::runtime_error("Incompatible types in ISpectrum::copyDataFrom"); +} +/// Override in child classes for polymorphic copying of data. +void ISpectrum::copyDataInto(DataObjects::Histogram1D &) const { + throw std::runtime_error("Incompatible types in ISpectrum::copyDataFrom"); +} + +/// Override in child classes for polymorphic copying of data. +void ISpectrum::copyDataInto(SpectrumTester &) const { + throw std::runtime_error("Incompatible types in ISpectrum::copyDataFrom"); +} + } // namespace Mantid } // namespace API diff --git a/Framework/API/src/IndexProperty.cpp b/Framework/API/src/IndexProperty.cpp index 3d8c808025a15ab0fcbabdd5427ac9076e197fb9..fa8dedb614158378416296d0df4446720019eea6 100644 --- a/Framework/API/src/IndexProperty.cpp +++ b/Framework/API/src/IndexProperty.cpp @@ -42,25 +42,15 @@ IndexProperty::operator Indexing::SpectrumIndexSet() const { } Indexing::SpectrumIndexSet IndexProperty::getIndices() const { - MatrixWorkspace_sptr wksp = boost::dynamic_pointer_cast<MatrixWorkspace>( - m_workspaceProp.getWorkspace()); - if (!wksp) - throw std::runtime_error("Invalid workspace type provided to " - "IndexProperty. Must be convertible to " - "MatrixWorkspace."); - - const auto &indexInfo = wksp->indexInfo(); + const auto &indexInfo = getIndexInfoFromWorkspace(); auto type = m_indexTypeProp.selectedType(); if (m_value.empty()) { return indexInfo.makeIndexSet(); } else { - auto res = std::minmax_element(m_value.cbegin(), m_value.cend()); - auto min = *res.first; - auto max = *res.second; - + auto min = m_value.front(); + auto max = m_value.back(); auto isRange = (max - min) == static_cast<int>(m_value.size() - 1); - if (isRange) { switch (type) { case IndexType::WorkspaceIndex: @@ -69,8 +59,8 @@ Indexing::SpectrumIndexSet IndexProperty::getIndices() const { static_cast<Indexing::GlobalSpectrumIndex>(max)); case IndexType::SpectrumNum: return indexInfo.makeIndexSet( - static_cast<Indexing::SpectrumNumber>(min), - static_cast<Indexing::SpectrumNumber>(max)); + static_cast<Indexing::SpectrumNumber>(static_cast<int32_t>(min)), + static_cast<Indexing::SpectrumNumber>(static_cast<int32_t>(max))); } } else { switch (type) { @@ -78,9 +68,13 @@ Indexing::SpectrumIndexSet IndexProperty::getIndices() const { return indexInfo.makeIndexSet( std::vector<Indexing::GlobalSpectrumIndex>(m_value.begin(), m_value.end())); - case IndexType::SpectrumNum: - return indexInfo.makeIndexSet(std::vector<Indexing::SpectrumNumber>( - m_value.begin(), m_value.end())); + case IndexType::SpectrumNum: { + std::vector<Indexing::SpectrumNumber> spectrumNumbers; + for (const auto index : m_value) + spectrumNumbers.push_back(static_cast<Indexing::SpectrumNumber>( + static_cast<int32_t>(index))); + return indexInfo.makeIndexSet(spectrumNumbers); + } } } } @@ -89,8 +83,47 @@ Indexing::SpectrumIndexSet IndexProperty::getIndices() const { return m_indices; } +/** Return IndexInfo created from workspace but containing selected spectra. + * + * The selected spectra are the same as in the SpectrumIndexSet returned by this + * property and the order is guaranteed to be consistent. That is, if the Nth + * entry in the SpectrumIndexSet is M, the spectrum with index M in the input + * workspace is equal to the spectrum with index N in the returned IndexInfo. */ +Indexing::IndexInfo IndexProperty::getFilteredIndexInfo() const { + const auto &indexInfo = getIndexInfoFromWorkspace(); + if (m_value.empty()) + return indexInfo; + switch (m_indexTypeProp.selectedType()) { + case IndexType::WorkspaceIndex: + return {std::vector<Indexing::GlobalSpectrumIndex>(m_value.begin(), + m_value.end()), + indexInfo}; + case IndexType::SpectrumNum: { + std::vector<Indexing::SpectrumNumber> spectrumNumbers; + for (const auto index : m_value) + spectrumNumbers.push_back( + static_cast<Indexing::SpectrumNumber>(static_cast<int32_t>(index))); + return {spectrumNumbers, indexInfo}; + } + default: + throw std::runtime_error( + "IndexProperty::getFilteredIndexInfo -- unsupported index type"); + } +} + std::string IndexProperty::generatePropertyName(const std::string &name) { return name + "IndexSet"; } + +const Indexing::IndexInfo &IndexProperty::getIndexInfoFromWorkspace() const { + auto wksp = boost::dynamic_pointer_cast<MatrixWorkspace>( + m_workspaceProp.getWorkspace()); + if (!wksp) + throw std::runtime_error("Invalid workspace type provided to " + "IndexProperty. Must be convertible to " + "MatrixWorkspace."); + return wksp->indexInfo(); +} + } // namespace API -} // namespace Mantid \ No newline at end of file +} // namespace Mantid diff --git a/Framework/API/src/MatrixWorkspace.cpp b/Framework/API/src/MatrixWorkspace.cpp index 1bb2efdd5b6ab82b3d403a30af94e88027544713..f957db8ec78ca57ea6dea03e9d71b8611cb55301 100644 --- a/Framework/API/src/MatrixWorkspace.cpp +++ b/Framework/API/src/MatrixWorkspace.cpp @@ -1,5 +1,5 @@ #include "MantidAPI/MatrixWorkspace.h" -#include "MantidAPI/Algorithm.tcc" +#include "MantidAPI/Algorithm.h" #include "MantidAPI/BinEdgeAxis.h" #include "MantidAPI/MatrixWorkspaceMDIterator.h" #include "MantidAPI/NumericAxis.h" @@ -581,6 +581,11 @@ MatrixWorkspace::getIndexFromSpectrumNumber(const specnum_t specNo) const { */ std::vector<size_t> MatrixWorkspace::getIndicesFromDetectorIDs( const std::vector<detid_t> &detIdList) const { + if (m_indexInfo->size() != m_indexInfo->globalSize()) + throw std::runtime_error("MatrixWorkspace: Using getIndicesFromDetectorIDs " + "in a parallel run is most likely incorrect. " + "Aborting."); + std::map<detid_t, std::set<size_t>> detectorIDtoWSIndices; for (size_t i = 0; i < getNumberHistograms(); ++i) { auto detIDs = getSpectrum(i).getDetectorIDs(); @@ -1084,6 +1089,16 @@ MatrixWorkspace::maskedBins(const size_t &workspaceIndex) const { return it->second; } +/** Set the list of masked bins for given workspaceIndex. Not thread safe. + * + * No data is masked and previous masking for any bin for this workspace index + * is overridden, so this should only be used for copying flags into a new + * workspace, not for performing masking operations. */ +void MatrixWorkspace::setMaskedBins(const size_t workspaceIndex, + const MaskList &maskedBins) { + m_masks[workspaceIndex] = maskedBins; +} + /** Sets the internal monitor workspace to the provided workspace. * This method is intended for use by data-loading algorithms. * Note that no checking is performed as to whether this workspace actually @@ -1896,7 +1911,7 @@ void MatrixWorkspace::setImageE(const MantidImage &image, size_t start, } void MatrixWorkspace::invalidateCachedSpectrumNumbers() { - if (storageMode() == Parallel::StorageMode::Distributed && + if (m_isInitialized && storageMode() == Parallel::StorageMode::Distributed && m_indexInfo->communicator().size() > 1) throw std::logic_error("Setting spectrum numbers in MatrixWorkspace via " "ISpectrum::setSpectrumNo is not possible in MPI " @@ -2009,40 +2024,6 @@ void MatrixWorkspace::rebuildDetectorIDGroupings() { } // namespace API } // Namespace Mantid -// Explicit Instantiations of IndexProperty Methods in Algorithm -namespace Mantid { -namespace API { -template DLLExport void -Algorithm::declareWorkspaceInputProperties<MatrixWorkspace>( - const std::string &propertyName, const int allowedIndexTypes, - PropertyMode::Type optional, LockMode::Type lock, const std::string &doc); - -template DLLExport void -Algorithm::setWorkspaceInputProperties<MatrixWorkspace, std::vector<int>>( - const std::string &name, const MatrixWorkspace_sptr &wksp, IndexType type, - const std::vector<int> &list); - -template DLLExport void -Algorithm::setWorkspaceInputProperties<MatrixWorkspace, std::string>( - const std::string &name, const MatrixWorkspace_sptr &wksp, IndexType type, - const std::string &list); - -template DLLExport void -Algorithm::setWorkspaceInputProperties<MatrixWorkspace, std::vector<int>>( - const std::string &name, const std::string &wsName, IndexType type, - const std::vector<int> &list); - -template DLLExport void -Algorithm::setWorkspaceInputProperties<MatrixWorkspace, std::string>( - const std::string &name, const std::string &wsName, IndexType type, - const std::string &list); - -template DLLExport - std::tuple<boost::shared_ptr<MatrixWorkspace>, Indexing::SpectrumIndexSet> - Algorithm::getWorkspaceAndIndices(const std::string &name) const; -} // namespace API -} // namespace Mantid - ///\cond TEMPLATE namespace Mantid { namespace Kernel { diff --git a/Framework/API/src/ParallelAlgorithm.cpp b/Framework/API/src/ParallelAlgorithm.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1538296ca9e4a1a60f0b7fb08f109b914a4261ef --- /dev/null +++ b/Framework/API/src/ParallelAlgorithm.cpp @@ -0,0 +1,12 @@ +#include "MantidAPI/ParallelAlgorithm.h" + +namespace Mantid { +namespace API { + +Parallel::ExecutionMode ParallelAlgorithm::getParallelExecutionMode( + const std::map<std::string, Parallel::StorageMode> &storageModes) const { + return Parallel::getCorrespondingExecutionMode(storageModes.begin()->second); +} + +} // namespace API +} // namespace Mantid diff --git a/Framework/API/src/WorkspaceFactory.cpp b/Framework/API/src/WorkspaceFactory.cpp index 88525dfa5d5b01926b840b8c6e748effe8e890b0..ffea8dc23753712aba98cc4a68887401eb3d8b6b 100644 --- a/Framework/API/src/WorkspaceFactory.cpp +++ b/Framework/API/src/WorkspaceFactory.cpp @@ -105,8 +105,10 @@ void WorkspaceFactoryImpl::initializeFromParent( // Same number of histograms = copy over the spectra data if (parent.getNumberHistograms() == child.getNumberHistograms()) { + child.m_isInitialized = false; for (size_t i = 0; i < parent.getNumberHistograms(); ++i) child.getSpectrum(i).copyInfoFrom(parent.getSpectrum(i)); + child.m_isInitialized = true; // We use this variant without ISpectrum update to avoid costly rebuilds // triggered by setIndexInfo(). ISpectrum::copyInfoFrom sets invalid flags // for spectrum definitions, so it is important to call this *afterwards*, diff --git a/Framework/API/test/AlgorithmTest.h b/Framework/API/test/AlgorithmTest.h index 188e11df23d7d487d35eb02dd020afe58965050f..92f3c17252ceb9c6a6e124cc28390e644b356b16 100644 --- a/Framework/API/test/AlgorithmTest.h +++ b/Framework/API/test/AlgorithmTest.h @@ -4,9 +4,10 @@ #include <cxxtest/TestSuite.h> #include "FakeAlgorithms.h" -#include "MantidAPI/Algorithm.h" +#include "MantidAPI/Algorithm.tcc" #include "MantidAPI/AlgorithmFactory.h" #include "MantidAPI/FrameworkManager.h" +#include "MantidAPI/HistogramValidator.h" #include "MantidAPI/WorkspaceFactory.h" #include "MantidAPI/WorkspaceGroup.h" #include "MantidAPI/WorkspaceProperty.h" @@ -171,10 +172,16 @@ public: static const std::string FAIL_MSG; void init() override { - declareWorkspaceInputProperties<MatrixWorkspace>("InputWorkspace"); + declareWorkspaceInputProperties<MatrixWorkspace>("InputWorkspace", ""); declareProperty( Mantid::Kernel::make_unique<WorkspaceProperty<MatrixWorkspace>>( "InputWorkspace2", "", Mantid::Kernel::Direction::Input)); + declareWorkspaceInputProperties< + MatrixWorkspace, IndexType::SpectrumNum | IndexType::WorkspaceIndex>( + "InputWorkspace3", ""); + declareWorkspaceInputProperties< + MatrixWorkspace, IndexType::SpectrumNum | IndexType::WorkspaceIndex>( + "InputWorkspace4", "", boost::make_shared<HistogramValidator>()); } void exec() override {} @@ -829,10 +836,9 @@ public: WorkspaceFactory::Instance().create("WorkspaceTester", 10, 10, 9); IndexingAlgorithm indexAlg; indexAlg.init(); - TS_ASSERT_THROWS_NOTHING(( - indexAlg.setWorkspaceInputProperties<MatrixWorkspace, std::vector<int>>( - "InputWorkspace", wksp, IndexType::WorkspaceIndex, - std::vector<int>{1, 2, 3, 4, 5}))); + TS_ASSERT_THROWS_NOTHING((indexAlg.setWorkspaceInputProperties( + "InputWorkspace", wksp, IndexType::WorkspaceIndex, + std::vector<int64_t>{1, 2, 3, 4, 5}))); } void @@ -854,10 +860,10 @@ public: IndexingAlgorithm indexAlg; indexAlg.init(); // Requires workspace in ADS due to validity checks - TS_ASSERT_THROWS_NOTHING(( - indexAlg.setWorkspaceInputProperties<MatrixWorkspace, std::vector<int>>( + TS_ASSERT_THROWS_NOTHING( + (indexAlg.setWorkspaceInputProperties<MatrixWorkspace>( "InputWorkspace", "wksp", IndexType::WorkspaceIndex, - std::vector<int>{1, 2, 3, 4, 5}))); + std::vector<int64_t>{1, 2, 3, 4, 5}))); AnalysisDataService::Instance().remove("wksp"); } diff --git a/Framework/API/test/IndexPropertyTest.h b/Framework/API/test/IndexPropertyTest.h index e802b61aae122aa661995d8931f125301b6f0c49..aab2ffb9ea173969e159b80652cf740ebe3ff023 100644 --- a/Framework/API/test/IndexPropertyTest.h +++ b/Framework/API/test/IndexPropertyTest.h @@ -7,6 +7,7 @@ #include "MantidAPI/WorkspaceProperty.h" #include "MantidKernel/PropertyManager.h" #include "MantidKernel/make_unique.h" +#include "MantidIndexing/IndexInfo.h" #include "MantidTestHelpers/FakeObjects.h" #include <boost/shared_ptr.hpp> #include <cxxtest/TestSuite.h> @@ -60,7 +61,7 @@ public: auto indexSet = indexProp.getIndices(); TS_ASSERT_EQUALS(indexSet.size(), 6); - std::vector<int> testVec{0, 1, 2, 3, 4, 7}; + std::vector<int64_t> testVec{0, 1, 2, 3, 4, 7}; for (size_t i = 0; i < indexSet.size(); i++) TS_ASSERT_EQUALS(indexSet[i], testVec[i]); @@ -77,7 +78,7 @@ public: auto indexSet = indexProp.getIndices(); TS_ASSERT_EQUALS(indexSet.size(), 6); - std::vector<int> testVec{0, 1, 2, 3, 4, 5}; + std::vector<int64_t> testVec{0, 1, 2, 3, 4, 5}; for (size_t i = 0; i < indexSet.size(); i++) TS_ASSERT_EQUALS(indexSet[i], testVec[i]); @@ -89,7 +90,7 @@ public: IndexTypeProperty itypeProp("IndexType", IndexType::SpectrumNum); IndexProperty indexProp("IndexSet", m_wkspProp, itypeProp); - std::vector<int> input{1, 3, 5, 7}; + std::vector<int64_t> input{1, 3, 5, 7}; indexProp = input; auto indexSet = indexProp.getIndices(); @@ -100,6 +101,22 @@ public: TS_ASSERT_EQUALS(indexSet[i], input[i] - 1); } + void testIndexOrderOfFullRangePreserved() { + auto ws = WorkspaceFactory::Instance().create("WorkspaceTester", 3, 1, 1); + m_wkspProp = ws; + IndexTypeProperty itypeProp("IndexType", IndexType::WorkspaceIndex); + IndexProperty indexProp("IndexSet", m_wkspProp, itypeProp); + std::vector<int64_t> input{0, 2, 1}; + indexProp = input; + + auto indexSet = indexProp.getIndices(); + + TS_ASSERT_EQUALS(indexSet.size(), 3); + TS_ASSERT_EQUALS(indexSet[0], 0); + TS_ASSERT_EQUALS(indexSet[1], 2); + TS_ASSERT_EQUALS(indexSet[2], 1); + } + void testInvalidWhenIndicesOutOfRange() { auto ws = WorkspaceFactory::Instance().create("WorkspaceTester", 10, 10, 9); m_wkspProp = ws; @@ -123,7 +140,7 @@ public: auto indices = Mantid::Indexing::SpectrumIndexSet(indexProp); TS_ASSERT(indices.size() == 5); - for (int i = 0; i < 5; i++) + for (int64_t i = 0; i < 5; i++) TS_ASSERT_EQUALS(indices[i], i + 1) } @@ -133,9 +150,43 @@ public: IndexProperty::generatePropertyName(propName)); } + void testGetFilteredIndexInfo_WorkspaceIndex() { + auto ws = WorkspaceFactory::Instance().create("WorkspaceTester", 3, 1, 1); + m_wkspProp = ws; + IndexTypeProperty itypeProp("IndexType", IndexType::WorkspaceIndex); + IndexProperty indexProp("IndexSet", m_wkspProp, itypeProp); + + auto indexInfo = indexProp.getFilteredIndexInfo(); + TS_ASSERT_EQUALS(indexInfo.size(), 3); + + std::vector<int64_t> input{1, 2}; + indexProp = input; + indexInfo = indexProp.getFilteredIndexInfo(); + TS_ASSERT_EQUALS(indexInfo.size(), 2); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(0), 2); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(1), 3); + } + + void testGetFilteredIndexInfo_SpectrumNum() { + auto ws = WorkspaceFactory::Instance().create("WorkspaceTester", 3, 1, 1); + m_wkspProp = ws; + IndexTypeProperty itypeProp("IndexType", IndexType::SpectrumNum); + IndexProperty indexProp("IndexSet", m_wkspProp, itypeProp); + + auto indexInfo = indexProp.getFilteredIndexInfo(); + TS_ASSERT_EQUALS(indexInfo.size(), 3); + + std::vector<int64_t> input{1, 2}; + indexProp = input; + indexInfo = indexProp.getFilteredIndexInfo(); + TS_ASSERT_EQUALS(indexInfo.size(), 2); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(0), 1); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(1), 2); + } + private: WorkspaceProperty<MatrixWorkspace> m_wkspProp; IndexTypeProperty m_itypeProp; }; -#endif /* MANTID_API_INDEXPROPERTYTEST_H_ */ \ No newline at end of file +#endif /* MANTID_API_INDEXPROPERTYTEST_H_ */ diff --git a/Framework/API/test/MatrixWorkspaceTest.h b/Framework/API/test/MatrixWorkspaceTest.h index 623f5f33f160d612cd8d135bd27ade5a269e969f..6403661dc8269ab77e97510b591b4ba6352a0db2 100644 --- a/Framework/API/test/MatrixWorkspaceTest.h +++ b/Framework/API/test/MatrixWorkspaceTest.h @@ -742,6 +742,16 @@ public: } } + void testSetMaskedBins() { + auto ws = makeWorkspaceWithDetectors(2, 2); + ws->flagMasked(0, 1); + ws->flagMasked(1, 0); + ws->setMaskedBins(1, ws->maskedBins(0)); + TS_ASSERT(ws->hasMaskedBins(1)); + TS_ASSERT_EQUALS(ws->maskedBins(1).size(), 1); + TS_ASSERT_EQUALS(ws->maskedBins(0).begin()->first, 1); + } + void testSize() { WorkspaceTester wkspace; wkspace.initialize(1, 4, 3); diff --git a/Framework/Algorithms/CMakeLists.txt b/Framework/Algorithms/CMakeLists.txt index 8873708619f5de06029ec5444e40efe2a70dad30..9b4830fb17ce9799d1616ed06cf8eeb55f418b21 100644 --- a/Framework/Algorithms/CMakeLists.txt +++ b/Framework/Algorithms/CMakeLists.txt @@ -17,9 +17,9 @@ set ( SRC_FILES src/ApplyTransmissionCorrection.cpp src/AsymmetryCalc.cpp src/AverageLogData.cpp + src/Bin2DPowderDiffraction.cpp src/BinaryOperateMasks.cpp - src/BinaryOperation.cpp - src/Bin2DPowderDiffraction.cpp + src/BinaryOperation.cpp src/CalMuonDeadTime.cpp src/CalMuonDetectorPhases.cpp src/CalculateCountRate.cpp @@ -46,8 +46,8 @@ set ( SRC_FILES src/Comment.cpp src/CommutativeBinaryOperation.cpp src/CompareWorkspaces.cpp - src/ConjoinXRuns.cpp src/ConjoinWorkspaces.cpp + src/ConjoinXRuns.cpp src/ConvertAxesToRealSpace.cpp src/ConvertAxisByFormula.cpp src/ConvertDiffCal.cpp @@ -121,6 +121,7 @@ set ( SRC_FILES src/ExtractMaskToTable.cpp src/ExtractSingleSpectrum.cpp src/ExtractSpectra.cpp + src/ExtractSpectra2.cpp src/ExtractUnmaskedSpectra.cpp src/FFT.cpp src/FFTDerivative.cpp @@ -159,6 +160,7 @@ set ( SRC_FILES src/GroupWorkspaces.cpp src/HRPDSlabCanAbsorption.cpp src/He3TubeEfficiency.cpp + src/HyspecScharpfCorrection.cpp src/IQTransform.cpp src/IdentifyNoisyDetectors.cpp src/IntegrateByComponent.cpp @@ -197,11 +199,11 @@ set ( SRC_FILES src/MultiplyRange.cpp src/MuonAsymmetryHelper.cpp src/MuonGroupDetectors.cpp + src/NRCalculateSlitResolution.cpp src/NormaliseByCurrent.cpp src/NormaliseByDetector.cpp src/NormaliseToMonitor.cpp src/NormaliseToUnity.cpp - src/NRCalculateSlitResolution.cpp src/OneMinusExponentialCor.cpp src/PDCalibration.cpp src/PDDetermineCharacterizations.cpp @@ -334,17 +336,17 @@ set ( INC_FILES inc/MantidAlgorithms/AlphaCalc.h inc/MantidAlgorithms/AnnularRingAbsorption.h inc/MantidAlgorithms/AnyShapeAbsorption.h + inc/MantidAlgorithms/ApodizationFunctions.h inc/MantidAlgorithms/AppendSpectra.h - inc/MantidAlgorithms/ApodizationFunctions.h inc/MantidAlgorithms/ApplyCalibration.h inc/MantidAlgorithms/ApplyDeadTimeCorr.h inc/MantidAlgorithms/ApplyDetailedBalance.h inc/MantidAlgorithms/ApplyTransmissionCorrection.h inc/MantidAlgorithms/AsymmetryCalc.h inc/MantidAlgorithms/AverageLogData.h + inc/MantidAlgorithms/Bin2DPowderDiffraction.h inc/MantidAlgorithms/BinaryOperateMasks.h inc/MantidAlgorithms/BinaryOperation.h - inc/MantidAlgorithms/Bin2DPowderDiffraction.h inc/MantidAlgorithms/BoostOptionalToAlgorithmProperty.h inc/MantidAlgorithms/CalMuonDeadTime.h inc/MantidAlgorithms/CalMuonDetectorPhases.h @@ -447,6 +449,7 @@ set ( INC_FILES inc/MantidAlgorithms/ExtractMaskToTable.h inc/MantidAlgorithms/ExtractSingleSpectrum.h inc/MantidAlgorithms/ExtractSpectra.h + inc/MantidAlgorithms/ExtractSpectra2.h inc/MantidAlgorithms/ExtractUnmaskedSpectra.h inc/MantidAlgorithms/FFT.h inc/MantidAlgorithms/FFTDerivative.h @@ -486,6 +489,7 @@ set ( INC_FILES inc/MantidAlgorithms/GroupWorkspaces.h inc/MantidAlgorithms/HRPDSlabCanAbsorption.h inc/MantidAlgorithms/He3TubeEfficiency.h + inc/MantidAlgorithms/HyspecScharpfCorrection.h inc/MantidAlgorithms/IQTransform.h inc/MantidAlgorithms/IdentifyNoisyDetectors.h inc/MantidAlgorithms/IntegrateByComponent.h @@ -527,16 +531,16 @@ set ( INC_FILES inc/MantidAlgorithms/MultiplyRange.h inc/MantidAlgorithms/MuonAsymmetryHelper.h inc/MantidAlgorithms/MuonGroupDetectors.h + inc/MantidAlgorithms/NRCalculateSlitResolution.h inc/MantidAlgorithms/NormaliseByCurrent.h inc/MantidAlgorithms/NormaliseByDetector.h inc/MantidAlgorithms/NormaliseToMonitor.h inc/MantidAlgorithms/NormaliseToUnity.h - inc/MantidAlgorithms/NRCalculateSlitResolution.h inc/MantidAlgorithms/OneMinusExponentialCor.h - inc/MantidAlgorithms/PaddingAndApodization.h inc/MantidAlgorithms/PDCalibration.h inc/MantidAlgorithms/PDDetermineCharacterizations.h inc/MantidAlgorithms/PDFFourierTransform.h + inc/MantidAlgorithms/PaddingAndApodization.h inc/MantidAlgorithms/Pause.h inc/MantidAlgorithms/PerformIndexOperations.h inc/MantidAlgorithms/PhaseQuadMuon.h @@ -684,9 +688,9 @@ set ( TEST_FILES ApplyTransmissionCorrectionTest.h AsymmetryCalcTest.h AverageLogDataTest.h + Bin2DPowderDiffractionTest.h BinaryOperateMasksTest.h BinaryOperationTest.h - Bin2DPowderDiffractionTest.h CalMuonDeadTimeTest.h CalMuonDetectorPhasesTest.h CalculateCountRateTest.h @@ -702,8 +706,8 @@ set ( TEST_FILES ChainedOperatorTest.h ChangeBinOffsetTest.h ChangeLogTimeTest.h - ChangePulsetimeTest.h ChangePulsetime2Test.h + ChangePulsetimeTest.h ChangeTimeZeroTest.h CheckWorkspacesMatchTest.h ChopDataTest.h @@ -785,6 +789,7 @@ set ( TEST_FILES ExtractMaskTest.h ExtractMaskToTableTest.h ExtractSingleSpectrumTest.h + ExtractSpectra2Test.h ExtractSpectraTest.h ExtractUnmaskedSpectraTest.h FFTDerivativeTest.h @@ -822,6 +827,7 @@ set ( TEST_FILES GroupWorkspacesTest.h HRPDSlabCanAbsorptionTest.h He3TubeEfficiencyTest.h + HyspecScharpfCorrectionTest.h IQTransformTest.h IdentifyNoisyDetectorsTest.h IntegrateByComponentTest.h @@ -860,15 +866,15 @@ set ( TEST_FILES MultiplyRangeTest.h MultiplyTest.h MuonGroupDetectorsTest.h + NRCalculateSlitResolutionTest.h NormaliseByCurrentTest.h NormaliseByDetectorTest.h NormaliseToMonitorTest.h - NRCalculateSlitResolutionTest.h OneMinusExponentialCorTest.h - PaddingAndApodizationTest.h PDCalibrationTest.h PDDetermineCharacterizationsTest.h PDFFourierTransformTest.h + PaddingAndApodizationTest.h PauseTest.h PerformIndexOperationsTest.h PhaseQuadMuonTest.h @@ -933,8 +939,8 @@ set ( TEST_FILES SortEventsTest.h SparseInstrumentTest.h SpatialGroupingTest.h - SpecularReflectionCalculateThetaTest.h SpecularReflectionCalculateTheta2Test.h + SpecularReflectionCalculateThetaTest.h SpecularReflectionPositionCorrect2Test.h SpecularReflectionPositionCorrectTest.h SphericalAbsorptionTest.h diff --git a/Framework/Algorithms/inc/MantidAlgorithms/CropWorkspace.h b/Framework/Algorithms/inc/MantidAlgorithms/CropWorkspace.h index 2861c43ce0dc5426f509ada66e38d8146565c956..23d072c16163a538d982aa05a3af705079b157c9 100644 --- a/Framework/Algorithms/inc/MantidAlgorithms/CropWorkspace.h +++ b/Framework/Algorithms/inc/MantidAlgorithms/CropWorkspace.h @@ -1,10 +1,7 @@ #ifndef MANTID_ALGORITHMS_CROPWORKSPACE_H_ #define MANTID_ALGORITHMS_CROPWORKSPACE_H_ -//---------------------------------------------------------------------- -// Includes -//---------------------------------------------------------------------- -#include "MantidAPI/Algorithm.h" +#include "MantidAPI/ParallelAlgorithm.h" namespace Mantid { namespace Algorithms { @@ -66,7 +63,7 @@ namespace Algorithms { File change history is stored at: <https://github.com/mantidproject/mantid> Code Documentation is available at: <http://doxygen.mantidproject.org> */ -class DLLExport CropWorkspace : public API::Algorithm { +class DLLExport CropWorkspace : public API::ParallelAlgorithm { public: /// Algorithm's name const std::string name() const override { return "CropWorkspace"; } diff --git a/Framework/Algorithms/inc/MantidAlgorithms/ExtractSpectra.h b/Framework/Algorithms/inc/MantidAlgorithms/ExtractSpectra.h index eda7d0939fa56808b607aca9dd59aba0359a3232..6f94a82a1991816d6f14723a4f381016d9e35a18 100644 --- a/Framework/Algorithms/inc/MantidAlgorithms/ExtractSpectra.h +++ b/Framework/Algorithms/inc/MantidAlgorithms/ExtractSpectra.h @@ -2,7 +2,7 @@ #define MANTID_ALGORITHMS_EXTRACTSPECTRA_H_ #include "MantidKernel/System.h" -#include "MantidAPI/Algorithm.h" +#include "MantidAPI/ParallelAlgorithm.h" #include "MantidDataObjects/EventWorkspace.h" namespace Mantid { @@ -33,7 +33,7 @@ namespace Algorithms { File change history is stored at: <https://github.com/mantidproject/mantid> Code Documentation is available at: <http://doxygen.mantidproject.org> */ -class DLLExport ExtractSpectra : public API::Algorithm { +class DLLExport ExtractSpectra : public API::ParallelAlgorithm { public: const std::string name() const override; int version() const override; @@ -46,11 +46,11 @@ private: void execHistogram(); void execEvent(); + void propagateBinMasking(API::MatrixWorkspace &workspace, const int i) const; void checkProperties(); std::size_t getXMin(const size_t wsIndex = 0); std::size_t getXMax(const size_t wsIndex = 0); - void cropRagged(API::MatrixWorkspace_sptr outputWorkspace, int inIndex, - int outIndex); + void cropRagged(API::MatrixWorkspace &workspace, int index); /// The input workspace API::MatrixWorkspace_sptr m_inputWorkspace; diff --git a/Framework/Algorithms/inc/MantidAlgorithms/ExtractSpectra2.h b/Framework/Algorithms/inc/MantidAlgorithms/ExtractSpectra2.h new file mode 100644 index 0000000000000000000000000000000000000000..208febc4d661da8d4b63a2c2e9192fe26b4915dd --- /dev/null +++ b/Framework/Algorithms/inc/MantidAlgorithms/ExtractSpectra2.h @@ -0,0 +1,55 @@ +#ifndef MANTID_ALGORITHMS_EXTRACTSPECTRA2_H_ +#define MANTID_ALGORITHMS_EXTRACTSPECTRA2_H_ + +#include "MantidAlgorithms/DllConfig.h" +#include "MantidAPI/ParallelAlgorithm.h" + +namespace Mantid { +namespace Algorithms { + +/** Extracts specified spectra from a workspace and places them in a new + workspace. In contrast to ExtractSpectra version 1 this does not support + cropping X at the same time. + + @author Simon Heybrock + @date 2017 + + Copyright © 2017 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge + National Laboratory & European Spallation Source + + This file is part of Mantid. + + Mantid is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + Mantid is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. + + File change history is stored at: <https://github.com/mantidproject/mantid> + Code Documentation is available at: <http://doxygen.mantidproject.org> +*/ +class MANTID_ALGORITHMS_DLL ExtractSpectra2 : public API::ParallelAlgorithm { +public: + const std::string name() const override; + int version() const override; + const std::string category() const override; + const std::string summary() const override; + +private: + void init() override; + void exec() override; + template <class T> + void exec(const T &inputWS, const Indexing::SpectrumIndexSet &indexSet); +}; + +} // namespace Algorithms +} // namespace Mantid + +#endif /* MANTID_ALGORITHMS_EXTRACTSPECTRA2_H_ */ diff --git a/Framework/Algorithms/inc/MantidAlgorithms/FilterBadPulses.h b/Framework/Algorithms/inc/MantidAlgorithms/FilterBadPulses.h index 54d83ef9beab0755f58aa92586f2a055a68177d8..282ba9028e69f22de43dd48e402c6d73dec40456 100644 --- a/Framework/Algorithms/inc/MantidAlgorithms/FilterBadPulses.h +++ b/Framework/Algorithms/inc/MantidAlgorithms/FilterBadPulses.h @@ -1,11 +1,8 @@ #ifndef MANTID_ALGORITHMS_FILTERBADPULSES_H_ #define MANTID_ALGORITHMS_FILTERBADPULSES_H_ -//---------------------------------------------------------------------- -// Includes -//---------------------------------------------------------------------- #include "MantidKernel/System.h" -#include "MantidAPI/Algorithm.h" +#include "MantidAPI/ParallelAlgorithm.h" #include "MantidDataObjects/EventWorkspace.h" namespace Mantid { @@ -49,7 +46,7 @@ namespace Algorithms { File change history is stored at: <https://github.com/mantidproject/mantid>. Code Documentation is available at: <http://doxygen.mantidproject.org> */ -class DLLExport FilterBadPulses : public API::Algorithm { +class DLLExport FilterBadPulses : public API::ParallelAlgorithm { public: const std::string name() const override; /// Summary of algorithms purpose diff --git a/Framework/Algorithms/inc/MantidAlgorithms/FilterByLogValue.h b/Framework/Algorithms/inc/MantidAlgorithms/FilterByLogValue.h index 3bd51a1cd9030a62b450f9d7e322a3c825a25f15..e55acb0d7fefeb9b1b51da8010f3b27a80be6661 100644 --- a/Framework/Algorithms/inc/MantidAlgorithms/FilterByLogValue.h +++ b/Framework/Algorithms/inc/MantidAlgorithms/FilterByLogValue.h @@ -1,11 +1,8 @@ #ifndef MANTID_ALGORITHMS_FILTERBYLOGVALUE_H_ #define MANTID_ALGORITHMS_FILTERBYLOGVALUE_H_ -//---------------------------------------------------------------------- -// Includes -//---------------------------------------------------------------------- #include "MantidKernel/System.h" -#include "MantidAPI/Algorithm.h" +#include "MantidAPI/ParallelAlgorithm.h" #include "MantidDataObjects/EventWorkspace.h" namespace Mantid { @@ -33,7 +30,7 @@ namespace Algorithms { File change history is stored at: <https://github.com/mantidproject/mantid>. Code Documentation is available at: <http://doxygen.mantidproject.org> */ -class DLLExport FilterByLogValue : public API::Algorithm { +class DLLExport FilterByLogValue : public API::ParallelAlgorithm { public: /// Algorithm's name for identification overriding a virtual method const std::string name() const override { return "FilterByLogValue"; }; diff --git a/Framework/Algorithms/inc/MantidAlgorithms/HyspecScharpfCorrection.h b/Framework/Algorithms/inc/MantidAlgorithms/HyspecScharpfCorrection.h new file mode 100644 index 0000000000000000000000000000000000000000..2d465d3843e0035f318427b3dc97084189f6e0e3 --- /dev/null +++ b/Framework/Algorithms/inc/MantidAlgorithms/HyspecScharpfCorrection.h @@ -0,0 +1,76 @@ +#ifndef MANTID_ALGORITHMS_HYSPECSCHARPFCORRECTION_H_ +#define MANTID_ALGORITHMS_HYSPECSCHARPFCORRECTION_H_ + +#include "MantidAlgorithms/DllConfig.h" +#include "MantidAPI/Algorithm.h" + +namespace Mantid { +namespace Algorithms { + +/** HyspecScharpfCorrection : Divide by cos(2alpha) where alpha is the angle + between incident beam and the polarization direction. It assumes scattering + in the horizontal plane + + Copyright © 2017 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge + National Laboratory & European Spallation Source + + This file is part of Mantid. + + Mantid is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + Mantid is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. + + File change history is stored at: <https://github.com/mantidproject/mantid> + Code Documentation is available at: <http://doxygen.mantidproject.org> +*/ +class MANTID_ALGORITHMS_DLL HyspecScharpfCorrection : public API::Algorithm { +public: + const std::string name() const override; + int version() const override; + const std::string category() const override; + const std::string summary() const override; + +private: + void init() override; + void exec() override; + void execEvent(); + /** + * Execute Scharpf correction for event lists + * @param wevector the list of events to correct + * @param thPlane the in-plane angle for the detector corresponding to the + * event list + */ + template <class T> + void ScharpfEventHelper(std::vector<T> &wevector, double thPlane); + /** + * @brief calculate the Scharph angle correction factor + * @param kfki kf/ki + * @param thPlane the in-plane angle of the detector + * @return factor + */ + float calculateFactor(const double kfki, const double thPlane); + /// The user selected (input) workspace + Mantid::API::MatrixWorkspace_const_sptr m_inputWS; + /// The output workspace, maybe the same as the input one + Mantid::API::MatrixWorkspace_sptr m_outputWS; + /// In plane angle beween polarization and incident beam (in degrees) + double m_angle; + /// Lower limit for abs(cos(2*Scharpf angle)), below which intensities are 0 + double m_precision; + /// Incident energy + double m_Ei; +}; + +} // namespace Algorithms +} // namespace Mantid + +#endif /* MANTID_ALGORITHMS_HYSPECSCHARPFCORRECTION_H_ */ diff --git a/Framework/Algorithms/inc/MantidAlgorithms/MaskBins.h b/Framework/Algorithms/inc/MantidAlgorithms/MaskBins.h index 0f3db6365d1d65fdb4eabe6061f82a89a4598c87..708ee013796682e9d1118e0eda412eb80a109189 100644 --- a/Framework/Algorithms/inc/MantidAlgorithms/MaskBins.h +++ b/Framework/Algorithms/inc/MantidAlgorithms/MaskBins.h @@ -1,12 +1,10 @@ #ifndef MANTID_ALGORITHMS_MASKBINS_H_ #define MANTID_ALGORITHMS_MASKBINS_H_ -//---------------------------------------------------------------------- -// Includes -//---------------------------------------------------------------------- -#include "MantidAPI/Algorithm.h" +#include "MantidAPI/ParallelAlgorithm.h" #include "MantidDataObjects/EventList.h" #include "MantidDataObjects/EventWorkspace.h" +#include "MantidIndexing/SpectrumIndexSet.h" namespace Mantid { @@ -55,10 +53,8 @@ namespace Algorithms { File change history is stored at: <https://github.com/mantidproject/mantid> Code Documentation is available at: <http://doxygen.mantidproject.org> */ -class DLLExport MaskBins : public API::Algorithm { +class DLLExport MaskBins : public API::ParallelAlgorithm { public: - /// Constructor - MaskBins(); /// Algorithm's name const std::string name() const override { return "MaskBins"; } /// Summary of algorithms purpose @@ -82,10 +78,10 @@ private: MantidVec::difference_type &startBin, MantidVec::difference_type &endBin); - double m_startX; ///< The range start point - double m_endX; ///< The range end point - std::vector<int> - spectra_list; ///<the list of Spectra (workspace index) to load + double m_startX{0.0}; ///< The range start point + double m_endX{0.0}; ///< The range end point + Indexing::SpectrumIndexSet + indexSet; ///<the list of Spectra (workspace index) to load }; } // namespace Algorithms diff --git a/Framework/Algorithms/inc/MantidAlgorithms/MonitorEfficiencyCorUser.h b/Framework/Algorithms/inc/MantidAlgorithms/MonitorEfficiencyCorUser.h index 6035af74f872b7fe343e29947d918f249e7c22b7..1b9c80ef93b430614ee09d73a63df5f4ff5c7a49 100644 --- a/Framework/Algorithms/inc/MantidAlgorithms/MonitorEfficiencyCorUser.h +++ b/Framework/Algorithms/inc/MantidAlgorithms/MonitorEfficiencyCorUser.h @@ -47,7 +47,7 @@ private: /// stores the incident energy of the neutrons double m_Ei = 0.0; /// stores the total count of neutrons from the monitor - int m_monitorCounts = 0; + double m_monitorCounts = 0; }; } // namespace Algorithms diff --git a/Framework/Algorithms/inc/MantidAlgorithms/Rebin.h b/Framework/Algorithms/inc/MantidAlgorithms/Rebin.h index a5ddf8a9b2a9f7a99c064bcd401e2d84e437287f..e4c2fd7eb576518ae8742a203b5cf4af81061075 100644 --- a/Framework/Algorithms/inc/MantidAlgorithms/Rebin.h +++ b/Framework/Algorithms/inc/MantidAlgorithms/Rebin.h @@ -1,10 +1,8 @@ #ifndef MANTID_ALGORITHMS_REBIN_H_ #define MANTID_ALGORITHMS_REBIN_H_ -//---------------------------------------------------------------------- -// Includes -//---------------------------------------------------------------------- -#include "MantidAPI/Algorithm.h" +#include "MantidAPI/ParallelAlgorithm.h" + namespace Mantid { namespace Algorithms { /** Takes a workspace as input and rebins the data according to the input rebin @@ -52,7 +50,7 @@ namespace Algorithms { File change history is stored at: <https://github.com/mantidproject/mantid> Code Documentation is available at: <http://doxygen.mantidproject.org> */ -class DLLExport Rebin : public API::Algorithm { +class DLLExport Rebin : public API::ParallelAlgorithm { public: /// Algorithm's name for identification overriding a virtual method const std::string name() const override { return "Rebin"; } @@ -90,10 +88,6 @@ protected: void propagateMasks(API::MatrixWorkspace_const_sptr inputWS, API::MatrixWorkspace_sptr outputWS, int hist); - - Parallel::ExecutionMode getParallelExecutionMode( - const std::map<std::string, Parallel::StorageMode> &storageModes) - const override; }; } // namespace Algorithms diff --git a/Framework/Algorithms/inc/MantidAlgorithms/RemovePromptPulse.h b/Framework/Algorithms/inc/MantidAlgorithms/RemovePromptPulse.h index 48d73311a8624613956ef471ee5453b2466e0252..c20a915bb7ea5cb957bec73a2ab950d5193fb58f 100644 --- a/Framework/Algorithms/inc/MantidAlgorithms/RemovePromptPulse.h +++ b/Framework/Algorithms/inc/MantidAlgorithms/RemovePromptPulse.h @@ -2,7 +2,7 @@ #define MANTID_ALGORITHMS_REMOVEPROMPTPULSE_H_ #include "MantidKernel/System.h" -#include "MantidAPI/Algorithm.h" +#include "MantidAPI/ParallelAlgorithm.h" #include "MantidAPI/Run.h" namespace Mantid { @@ -34,7 +34,7 @@ namespace Algorithms { File change history is stored at: <https://github.com/mantidproject/mantid> Code Documentation is available at: <http://doxygen.mantidproject.org> */ -class DLLExport RemovePromptPulse : public API::Algorithm { +class DLLExport RemovePromptPulse : public API::ParallelAlgorithm { public: /// Algorithm's name for identification const std::string name() const override; diff --git a/Framework/Algorithms/inc/MantidAlgorithms/SortEvents.h b/Framework/Algorithms/inc/MantidAlgorithms/SortEvents.h index 126c1b8ab62865baf8254eb1c7723f12212e3273..86b09cc5acd0e11d67daf364b793a7e5e18ab601 100644 --- a/Framework/Algorithms/inc/MantidAlgorithms/SortEvents.h +++ b/Framework/Algorithms/inc/MantidAlgorithms/SortEvents.h @@ -1,10 +1,7 @@ #ifndef MANTID_ALGORITHMS_SORTEVENTS_H_ #define MANTID_ALGORITHMS_SORTEVENTS_H_ -//---------------------------------------------------------------------- -// Includes -//---------------------------------------------------------------------- -#include "MantidAPI/Algorithm.h" +#include "MantidAPI/ParallelAlgorithm.h" namespace Mantid { namespace Algorithms { @@ -42,7 +39,7 @@ namespace Algorithms { File change history is stored at: <https://github.com/mantidproject/mantid> Code Documentation is available at: <http://doxygen.mantidproject.org> */ -class DLLExport SortEvents : public API::Algorithm { +class DLLExport SortEvents : public API::ParallelAlgorithm { public: /// Algorithm's name for identification overriding a virtual method const std::string name() const override { return "SortEvents"; } diff --git a/Framework/Algorithms/src/CalculatePolynomialBackground.cpp b/Framework/Algorithms/src/CalculatePolynomialBackground.cpp index 472ef3c87cdd7312a1e81eb92319bda955733f17..5d4691b252d205a6d0ca8dd1cfbd4447e04501b1 100644 --- a/Framework/Algorithms/src/CalculatePolynomialBackground.cpp +++ b/Framework/Algorithms/src/CalculatePolynomialBackground.cpp @@ -9,18 +9,26 @@ #include "MantidKernel/ArrayOrderedPairsValidator.h" #include "MantidKernel/ArrayProperty.h" #include "MantidKernel/BoundedValidator.h" +#include "MantidKernel/ListValidator.h" #include <utility> namespace { /// String constants for algorithm's properties. namespace Prop { +const static std::string COST_FUNCTION = "CostFunction"; const static std::string INPUT_WS = "InputWorkspace"; const static std::string OUTPUT_WS = "OutputWorkspace"; const static std::string POLY_DEGREE = "Degree"; const static std::string XRANGES = "XRanges"; } +/// String constants for cost function options. +namespace CostFunc { +const static std::string UNWEIGHTED_LEAST_SQUARES = "Unweighted least squares"; +const static std::string WEIGHTED_LEAST_SQUARES = "Least squares"; +} + /** Filters ranges completely outside the histogram X values. * @param ranges a vector of start-end pairs to filter * @param ws a workspace @@ -155,11 +163,10 @@ std::vector<double> invertRanges(const std::vector<double> &ranges) { * @param ranges a vector defining the fitting intervals * @return a vector of final fitted parameters */ -std::vector<double> executeFit(Mantid::API::Algorithm &fit, - const std::string &function, - Mantid::API::MatrixWorkspace_sptr &ws, - const size_t wsIndex, - const std::vector<double> &ranges) { +std::vector<double> +executeFit(Mantid::API::Algorithm &fit, const std::string &function, + Mantid::API::MatrixWorkspace_sptr &ws, const size_t wsIndex, + const std::vector<double> &ranges, const std::string &costFunction) { const auto fitRanges = histogramRanges(ranges, *ws, wsIndex); const auto excludedRanges = invertRanges(fitRanges); fit.setProperty("Function", function); @@ -168,6 +175,8 @@ std::vector<double> executeFit(Mantid::API::Algorithm &fit, fit.setProperty("StartX", fitRanges.front()); fit.setProperty("EndX", fitRanges.back()); fit.setProperty("Exclude", excludedRanges); + fit.setProperty("Minimizer", "Levenberg-MarquardtMD"); + fit.setProperty(Prop::COST_FUNCTION, costFunction); fit.setProperty("CreateOutput", true); fit.executeAsChildAlg(); Mantid::API::ITableWorkspace_sptr fitResult = @@ -268,6 +277,12 @@ void CalculatePolynomialBackground::init() { declareProperty(Kernel::make_unique<Kernel::ArrayProperty<double>>( Prop::XRANGES, std::vector<double>(), orderedPairs), "A list of fitting ranges given as pairs of X values."); + std::array<std::string, 2> costFuncOpts{ + {CostFunc::WEIGHTED_LEAST_SQUARES, CostFunc::UNWEIGHTED_LEAST_SQUARES}}; + declareProperty( + Prop::COST_FUNCTION, CostFunc::WEIGHTED_LEAST_SQUARES.c_str(), + boost::make_shared<Kernel::ListValidator<std::string>>(costFuncOpts), + "The cost function to be passed to the Fit algorithm."); } //---------------------------------------------------------------------------------------------- @@ -279,6 +294,7 @@ void CalculatePolynomialBackground::exec() { API::MatrixWorkspace_sptr outWS{ DataObjects::create<DataObjects::Workspace2D>(*inWS)}; const std::vector<double> inputRanges = getProperty(Prop::XRANGES); + const std::string costFunction = getProperty(Prop::COST_FUNCTION); const auto polyDegree = static_cast<size_t>(static_cast<int>(getProperty(Prop::POLY_DEGREE))); const std::vector<double> initialParams(polyDegree + 1, 0.1); @@ -290,7 +306,8 @@ void CalculatePolynomialBackground::exec() { PARALLEL_START_INTERUPT_REGION const bool logging{false}; auto fit = createChildAlgorithm("Fit", 0, 0, logging); - const auto parameters = executeFit(*fit, fitFunction, inWS, i, inputRanges); + const auto parameters = + executeFit(*fit, fitFunction, inWS, i, inputRanges, costFunction); const auto bkgFunction = makeFunctionString(parameters); evaluateInPlace(bkgFunction, *outWS, i); progress.report(); diff --git a/Framework/Algorithms/src/ChangePulsetime2.cpp b/Framework/Algorithms/src/ChangePulsetime2.cpp index 216ec74ce8d14a62e11845999b81c38292f06863..68b24d2cfb531f5539fc4f5d44462c0b14f93083 100644 --- a/Framework/Algorithms/src/ChangePulsetime2.cpp +++ b/Framework/Algorithms/src/ChangePulsetime2.cpp @@ -1,4 +1,5 @@ #include "MantidAlgorithms/ChangePulsetime2.h" +#include "MantidAPI/Algorithm.tcc" #include "MantidAPI/WorkspaceFactory.h" #include "MantidDataObjects/EventWorkspace.h" #include "MantidKernel/ArrayProperty.h" @@ -19,7 +20,8 @@ using std::size_t; /** Initialize the algorithm's properties. */ void ChangePulsetime2::init() { - declareWorkspaceInputProperties<EventWorkspace>("InputWorkspace"); + declareWorkspaceInputProperties<EventWorkspace>("InputWorkspace", + "An input event workspace."); declareProperty( make_unique<PropertyWithValue<double>>("TimeOffset", Direction::Input), "Number of seconds (a float) to add to each event's pulse " diff --git a/Framework/Algorithms/src/CropWorkspace.cpp b/Framework/Algorithms/src/CropWorkspace.cpp index 620e15a10e607bb53e95a3b0c8049dcb7c3e5752..eee69eaab8cbac3372dcf8a8e30e9aa90be21897 100644 --- a/Framework/Algorithms/src/CropWorkspace.cpp +++ b/Framework/Algorithms/src/CropWorkspace.cpp @@ -1,6 +1,3 @@ -//---------------------------------------------------------------------- -// Includes -//---------------------------------------------------------------------- #include "MantidAPI/MatrixWorkspace.h" #include "MantidAlgorithms/CropWorkspace.h" #include "MantidKernel/BoundedValidator.h" diff --git a/Framework/Algorithms/src/ExtractSpectra.cpp b/Framework/Algorithms/src/ExtractSpectra.cpp index 216e46bc3a6aee87acee4e45b7d6fe29d9b7df27..3c2609c073c0f562dde9645f0d63b19dfda79072 100644 --- a/Framework/Algorithms/src/ExtractSpectra.cpp +++ b/Framework/Algorithms/src/ExtractSpectra.cpp @@ -1,14 +1,16 @@ #include "MantidAlgorithms/ExtractSpectra.h" +#include "MantidAlgorithms/ExtractSpectra2.h" #include "MantidDataObjects/WorkspaceCreation.h" +#include "MantidAPI/Algorithm.tcc" #include "MantidAPI/NumericAxis.h" #include "MantidAPI/TextAxis.h" #include "MantidAPI/WorkspaceFactory.h" #include "MantidKernel/ArrayProperty.h" #include "MantidKernel/BoundedValidator.h" -#include "MantidKernel/VectorHelper.h" #include "MantidIndexing/Extract.h" #include "MantidIndexing/IndexInfo.h" +#include "MantidHistogramData/Slice.h" #include <algorithm> @@ -23,13 +25,12 @@ namespace Algorithms { using namespace Kernel; using namespace API; using namespace DataObjects; +using namespace HistogramData; using Types::Event::TofEvent; // Register the algorithm into the AlgorithmFactory DECLARE_ALGORITHM(ExtractSpectra) -//---------------------------------------------------------------------------------------------- - /// Algorithms name for identification. @see Algorithm::name const std::string ExtractSpectra::name() const { return "ExtractSpectra"; } @@ -47,7 +48,6 @@ const std::string ExtractSpectra::summary() const { "workspace."; } -//---------------------------------------------------------------------------------------------- /** Initialize the algorithm's properties. */ void ExtractSpectra::init() { @@ -91,127 +91,50 @@ void ExtractSpectra::init() { "the latter is being selected."); } -//---------------------------------------------------------------------------------------------- /** Executes the algorithm * @throw std::out_of_range If a property is set to an invalid value for the * input workspace */ void ExtractSpectra::exec() { - // Get the input workspace m_inputWorkspace = getProperty("InputWorkspace"); m_histogram = m_inputWorkspace->isHistogramData(); - // Check for common boundaries in input workspace m_commonBoundaries = WorkspaceHelpers::commonBoundaries(*m_inputWorkspace); + this->checkProperties(); + + auto extract = boost::make_shared<ExtractSpectra2>(); + setupAsChildAlgorithm(extract); + extract->setWorkspaceInputProperties( + "InputWorkspace", m_inputWorkspace, IndexType::WorkspaceIndex, + std::vector<int64_t>(m_workspaceIndexList.begin(), + m_workspaceIndexList.end())); + extract->execute(); + m_inputWorkspace = extract->getProperty("OutputWorkspace"); + setProperty("OutputWorkspace", m_inputWorkspace); + + if (isDefault("XMin") && isDefault("XMax")) + return; eventW = boost::dynamic_pointer_cast<EventWorkspace>(m_inputWorkspace); - if (eventW != nullptr) { - // Input workspace is an event workspace. Use the other exec method + if (eventW) this->execEvent(); - } else { - // Otherwise it's a Workspace2D + else this->execHistogram(); - } } /// Execute the algorithm in case of a histogrammed data. void ExtractSpectra::execHistogram() { - // Retrieve and validate the input properties - this->checkProperties(); - - // Create the output workspace - MatrixWorkspace_sptr outputWorkspace = WorkspaceFactory::Instance().create( - m_inputWorkspace, m_workspaceIndexList.size(), m_maxX - m_minX, - m_maxX - m_minX - m_histogram); - outputWorkspace->setIndexInfo( - Indexing::extract(m_inputWorkspace->indexInfo(), m_workspaceIndexList)); - - // If this is a Workspace2D, get the spectra axes for copying in the spectraNo - // later - Axis *inAxis1(nullptr); - TextAxis *outTxtAxis(nullptr); - NumericAxis *outNumAxis(nullptr); - if (m_inputWorkspace->axes() > 1) { - inAxis1 = m_inputWorkspace->getAxis(1); - auto outAxis1 = outputWorkspace->getAxis(1); - outTxtAxis = dynamic_cast<TextAxis *>(outAxis1); - if (!outTxtAxis) - outNumAxis = dynamic_cast<NumericAxis *>(outAxis1); - } - - cow_ptr<HistogramData::HistogramX> newX(nullptr); - if (m_commonBoundaries) { - auto &oldX = m_inputWorkspace->x(m_workspaceIndexList.front()); - newX = make_cow<HistogramData::HistogramX>(oldX.begin() + m_minX, - oldX.begin() + m_maxX); - } - - bool doCrop = ((m_minX != 0) || (m_maxX != m_inputWorkspace->x(0).size())); - - Progress prog(this, 0.0, 1.0, (m_workspaceIndexList.size())); - // Loop over the required workspace indices, copying in the desired bins - for (int j = 0; j < static_cast<int>(m_workspaceIndexList.size()); ++j) { - auto i = m_workspaceIndexList[j]; - - bool hasDx = m_inputWorkspace->hasDx(i); - - // Preserve/restore sharing if X vectors are the same + int size = static_cast<int>(m_inputWorkspace->getNumberHistograms()); + Progress prog(this, 0.0, 1.0, size); + for (int i = 0; i < size; ++i) { if (m_commonBoundaries) { - outputWorkspace->setSharedX(j, newX); - if (hasDx) { - auto &oldDx = m_inputWorkspace->dx(i); - outputWorkspace->setSharedDx( - j, make_cow<HistogramData::HistogramDx>( - oldDx.begin() + m_minX, - oldDx.begin() + (m_maxX - m_histogram))); - } - } else { - // Safe to just copy whole vector 'cos can't be cropping in X if not - // common - outputWorkspace->setSharedX(j, m_inputWorkspace->sharedX(i)); - outputWorkspace->setSharedDx(j, m_inputWorkspace->sharedDx(i)); - } - - if (doCrop) { - auto &oldY = m_inputWorkspace->y(i); - outputWorkspace->mutableY(j) - .assign(oldY.begin() + m_minX, oldY.begin() + (m_maxX - m_histogram)); - auto &oldE = m_inputWorkspace->e(i); - outputWorkspace->mutableE(j) - .assign(oldE.begin() + m_minX, oldE.begin() + (m_maxX - m_histogram)); + m_inputWorkspace->setHistogram(i, slice(m_inputWorkspace->histogram(i), + m_minX, m_maxX - m_histogram)); } else { - outputWorkspace->setSharedY(j, m_inputWorkspace->sharedY(i)); - outputWorkspace->setSharedE(j, m_inputWorkspace->sharedE(i)); - } - - // copy over the axis entry for each spectrum, regardless of the type of - // axes present - if (inAxis1) { - if (outTxtAxis) { - outTxtAxis->setLabel(j, inAxis1->label(i)); - } else if (outNumAxis) { - outNumAxis->setValue(j, inAxis1->operator()(i)); - } - // spectra axis is implicit in workspace creation - } - - if (!m_commonBoundaries) - this->cropRagged(outputWorkspace, static_cast<int>(i), j); - - // Propagate bin masking if there is any - if (m_inputWorkspace->hasMaskedBins(i)) { - const MatrixWorkspace::MaskList &inputMasks = - m_inputWorkspace->maskedBins(i); - MatrixWorkspace::MaskList::const_iterator it; - for (it = inputMasks.begin(); it != inputMasks.end(); ++it) { - const size_t maskIndex = (*it).first; - if (maskIndex >= m_minX && maskIndex < m_maxX - m_histogram) - outputWorkspace->flagMasked(j, maskIndex - m_minX, (*it).second); - } + this->cropRagged(*m_inputWorkspace, i); } + propagateBinMasking(*m_inputWorkspace, i); prog.report(); } - - setProperty("OutputWorkspace", outputWorkspace); } namespace { // anonymous namespace @@ -222,7 +145,7 @@ template <class T> struct eventFilter { bool operator()(const T &value) { const double tof = value.tof(); - return (tof <= maxValue && tof >= minValue); + return !(tof <= maxValue && tof >= minValue); } double minValue; @@ -230,11 +153,11 @@ template <class T> struct eventFilter { }; template <class T> -void copyEventsHelper(const std::vector<T> &inputEvents, - std::vector<T> &outputEvents, const double xmin, - const double xmax) { - copy_if(inputEvents.begin(), inputEvents.end(), - std::back_inserter(outputEvents), eventFilter<T>(xmin, xmax)); +void filterEventsHelper(std::vector<T> &events, const double xmin, + const double xmax) { + events.erase( + std::remove_if(events.begin(), events.end(), eventFilter<T>(xmin, xmax)), + events.end()); } } @@ -250,110 +173,68 @@ void ExtractSpectra::execEvent() { if (isEmpty(maxX_val)) maxX_val = eventW->getTofMax(); - // Retrieve and validate the input properties - this->checkProperties(); - HistogramData::BinEdges XValues_new(2); + BinEdges binEdges(2); if (m_commonBoundaries) { - auto &oldX = m_inputWorkspace->x(m_workspaceIndexList.front()); - XValues_new = - HistogramData::BinEdges(oldX.begin() + m_minX, oldX.begin() + m_maxX); + auto &oldX = m_inputWorkspace->x(0); + binEdges = BinEdges(oldX.begin() + m_minX, oldX.begin() + m_maxX); } - if (m_maxX - m_minX < 2) { // create new output X axis - std::vector<double> rb_params{minX_val, maxX_val - minX_val, maxX_val}; - static_cast<void>(VectorHelper::createAxisFromRebinParams( - rb_params, XValues_new.mutableRawData())); + binEdges = {minX_val, maxX_val}; } - // run inplace branch if appropriate - MatrixWorkspace_sptr OutputWorkspace = this->getProperty("OutputWorkspace"); - bool inPlace = (OutputWorkspace == m_inputWorkspace); - if (inPlace) - g_log.debug("Cropping EventWorkspace in-place."); - - // Create the output workspace eventW->sortAll(TOF_SORT, nullptr); - auto outputWorkspace = create<EventWorkspace>( - *m_inputWorkspace, - Indexing::extract(m_inputWorkspace->indexInfo(), m_workspaceIndexList), - XValues_new); - outputWorkspace->sortAll(TOF_SORT, nullptr); - - Progress prog(this, 0.0, 1.0, 2 * m_workspaceIndexList.size()); - eventW->sortAll(Mantid::DataObjects::TOF_SORT, &prog); - // Loop over the required workspace indices, copying in the desired bins - PARALLEL_FOR_IF(Kernel::threadSafe(*m_inputWorkspace, *outputWorkspace)) - for (int j = 0; j < static_cast<int>(m_workspaceIndexList.size()); ++j) { + + Progress prog(this, 0.0, 1.0, eventW->getNumberHistograms()); + PARALLEL_FOR_IF(Kernel::threadSafe(*eventW)) + for (int i = 0; i < static_cast<int>(eventW->getNumberHistograms()); ++i) { PARALLEL_START_INTERUPT_REGION - auto i = m_workspaceIndexList[j]; - const EventList &el = eventW->getSpectrum(i); - // The output event list - EventList &outEL = outputWorkspace->getSpectrum(j); + EventList &el = eventW->getSpectrum(i); switch (el.getEventType()) { case TOF: { - std::vector<TofEvent> moreevents; - moreevents.reserve(el.getNumberEvents()); // assume all will make it - copyEventsHelper(el.getEvents(), moreevents, minX_val, maxX_val); - outEL += moreevents; + filterEventsHelper(el.getEvents(), minX_val, maxX_val); break; } case WEIGHTED: { - std::vector<WeightedEvent> moreevents; - moreevents.reserve(el.getNumberEvents()); // assume all will make it - copyEventsHelper(el.getWeightedEvents(), moreevents, minX_val, maxX_val); - outEL += moreevents; + filterEventsHelper(el.getWeightedEvents(), minX_val, maxX_val); break; } case WEIGHTED_NOTIME: { - std::vector<WeightedEventNoTime> moreevents; - moreevents.reserve(el.getNumberEvents()); // assume all will make it - copyEventsHelper(el.getWeightedEventsNoTime(), moreevents, minX_val, - maxX_val); - outEL += moreevents; + filterEventsHelper(el.getWeightedEventsNoTime(), minX_val, maxX_val); break; } } - outEL.setSortOrder(el.getSortType()); - - bool hasDx = eventW->hasDx(i); - if (!m_commonBoundaries) { - // If the X axis is NOT common, then keep the initial X axis, just clear - // the events - outEL.setX(el.ptrX()); - outEL.setSharedDx(el.sharedDx()); - } else { - // X is already set in workspace creation, just set Dx if necessary. - if (hasDx) { - auto &oldDx = m_inputWorkspace->dx(i); - outEL.setPointStandardDeviations( - oldDx.begin() + m_minX, oldDx.begin() + (m_maxX - m_histogram)); - } - } - - // Propagate bin masking if there is any - if (m_inputWorkspace->hasMaskedBins(i)) { - const MatrixWorkspace::MaskList &inputMasks = - m_inputWorkspace->maskedBins(i); - MatrixWorkspace::MaskList::const_iterator it; - for (it = inputMasks.begin(); it != inputMasks.end(); ++it) { - const size_t maskIndex = (*it).first; - if (maskIndex >= m_minX && maskIndex < m_maxX - m_histogram) - outputWorkspace->flagMasked(j, maskIndex - m_minX, (*it).second); + // If the X axis is NOT common, then keep the initial X axis, just clear the + // events, otherwise: + if (m_commonBoundaries) { + const auto oldDx = el.pointStandardDeviations(); + el.setHistogram(binEdges); + if (oldDx) { + el.setPointStandardDeviations(oldDx.begin() + m_minX, + oldDx.begin() + (m_maxX - m_histogram)); } } - // When cropping in place, you can clear out old memory from the input one! - if (inPlace) { - eventW->getSpectrum(i).clear(); - } + propagateBinMasking(*eventW, i); prog.report(); PARALLEL_END_INTERUPT_REGION } PARALLEL_CHECK_INTERUPT_REGION +} - setProperty("OutputWorkspace", std::move(outputWorkspace)); +/// Propagate bin masking if there is any. +void ExtractSpectra::propagateBinMasking(MatrixWorkspace &workspace, + const int i) const { + if (workspace.hasMaskedBins(i)) { + MatrixWorkspace::MaskList filteredMask; + for (const auto &mask : workspace.maskedBins(i)) { + const size_t maskIndex = mask.first; + if (maskIndex >= m_minX && maskIndex < m_maxX - m_histogram) + filteredMask[maskIndex - m_minX] = mask.second; + } + workspace.setMaskedBins(i, filteredMask); + } } /** Retrieves the optional input properties and checks that they have valid @@ -365,6 +246,29 @@ void ExtractSpectra::execEvent() { * input workspace */ void ExtractSpectra::checkProperties() { + m_minX = this->getXMin(); + m_maxX = this->getXMax(); + const size_t xSize = m_inputWorkspace->x(0).size(); + if (m_minX > 0 || m_maxX < xSize) { + if (m_minX > m_maxX) { + g_log.error("XMin must be less than XMax"); + throw std::out_of_range("XMin must be less than XMax"); + } + if ((m_minX == m_maxX || + (m_inputWorkspace->isHistogramData() && m_maxX == m_minX + 1)) && + m_commonBoundaries && + !boost::dynamic_pointer_cast<EventWorkspace>(m_inputWorkspace)) { + g_log.error("The X range given lies entirely within a single bin"); + throw std::out_of_range( + "The X range given lies entirely within a single bin"); + } + m_croppingInX = true; + } + if (!m_commonBoundaries) + m_minX = 0; + if (!m_commonBoundaries) + m_maxX = static_cast<int>(m_inputWorkspace->x(0).size()); + // The hierarchy of inputs is (one is being selected): // 1. DetectorList // 2. WorkspaceIndexList @@ -379,21 +283,11 @@ void ExtractSpectra::checkProperties() { if (m_workspaceIndexList.empty()) { int minSpec_i = getProperty("StartWorkspaceIndex"); size_t minSpec = static_cast<size_t>(minSpec_i); - const size_t numberOfSpectra = m_inputWorkspace->getNumberHistograms(); + const size_t numberOfSpectra = m_inputWorkspace->indexInfo().globalSize(); int maxSpec_i = getProperty("EndWorkspaceIndex"); size_t maxSpec = static_cast<size_t>(maxSpec_i); if (isEmpty(maxSpec_i)) maxSpec = numberOfSpectra - 1; - - // Check 'StartSpectrum' is in range 0-numberOfSpectra - if (minSpec > numberOfSpectra - 1) { - g_log.error("StartWorkspaceIndex out of range!"); - throw std::out_of_range("StartSpectrum out of range!"); - } - if (maxSpec > numberOfSpectra - 1) { - g_log.error("EndWorkspaceIndex out of range!"); - throw std::out_of_range("EndWorkspaceIndex out of range!"); - } if (maxSpec < minSpec) { g_log.error("StartWorkspaceIndex must be less than or equal to " "EndWorkspaceIndex"); @@ -402,38 +296,10 @@ void ExtractSpectra::checkProperties() { "to EndWorkspaceIndex"); } m_workspaceIndexList.reserve(maxSpec - minSpec + 1); - for (size_t i = static_cast<size_t>(minSpec); - i <= static_cast<size_t>(maxSpec); ++i) { + for (size_t i = minSpec; i <= maxSpec; ++i) m_workspaceIndexList.push_back(i); - } } } - - // get the x-range from the used spectra - size_t spectrumIndex = 0; // default is just look at the initial spectrum - if (!m_workspaceIndexList.empty()) - spectrumIndex = - m_workspaceIndexList.front(); // or the first one being extracted - - m_minX = this->getXMin(spectrumIndex); - m_maxX = this->getXMax(spectrumIndex); - const size_t xSize = m_inputWorkspace->x(spectrumIndex).size(); - if (m_minX > 0 || m_maxX < xSize) { - if (m_minX > m_maxX) { - g_log.error("XMin must be less than XMax"); - throw std::out_of_range("XMin must be less than XMax"); - } - if (m_minX == m_maxX && m_commonBoundaries && eventW == nullptr) { - g_log.error("The X range given lies entirely within a single bin"); - throw std::out_of_range( - "The X range given lies entirely within a single bin"); - } - m_croppingInX = true; - } - if (!m_commonBoundaries) - m_minX = 0; - if (!m_commonBoundaries) - m_maxX = static_cast<int>(m_inputWorkspace->x(spectrumIndex).size()); } /** Find the X index corresponding to (or just within) the value given in the @@ -491,26 +357,21 @@ size_t ExtractSpectra::getXMax(const size_t wsIndex) { } /** Zeroes all data points outside the X values given - * @param outputWorkspace :: The output workspace - data has already been - * copied - * @param inIndex :: The workspace index of the spectrum in the input - * workspace - * @param outIndex :: The workspace index of the spectrum in the output - * workspace + * @param workspace :: The output workspace to crop + * @param index :: The workspace index of the spectrum */ -void ExtractSpectra::cropRagged(API::MatrixWorkspace_sptr outputWorkspace, - int inIndex, int outIndex) { - auto &Y = outputWorkspace->mutableY(outIndex); - auto &E = outputWorkspace->mutableE(outIndex); +void ExtractSpectra::cropRagged(MatrixWorkspace &workspace, int index) { + auto &Y = workspace.mutableY(index); + auto &E = workspace.mutableE(index); const size_t size = Y.size(); - size_t startX = this->getXMin(inIndex); + size_t startX = this->getXMin(index); if (startX > size) startX = size; for (size_t i = 0; i < startX; ++i) { Y[i] = 0.0; E[i] = 0.0; } - size_t endX = this->getXMax(inIndex); + size_t endX = this->getXMax(index); if (endX > 0) endX -= m_histogram; for (size_t i = endX; i < size; ++i) { diff --git a/Framework/Algorithms/src/ExtractSpectra2.cpp b/Framework/Algorithms/src/ExtractSpectra2.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f4df15fb16db83e6a36f66386778ed11b6feb629 --- /dev/null +++ b/Framework/Algorithms/src/ExtractSpectra2.cpp @@ -0,0 +1,96 @@ +#include "MantidAlgorithms/ExtractSpectra2.h" +#include "MantidDataObjects/EventWorkspace.h" +#include "MantidDataObjects/Workspace2D.h" +#include "MantidDataObjects/WorkspaceCreation.h" +#include "MantidAPI/Algorithm.tcc" +#include "MantidAPI/NumericAxis.h" +#include "MantidAPI/TextAxis.h" +#include "MantidIndexing/IndexInfo.h" +#include "MantidKernel/make_unique.h" + +namespace Mantid { +using namespace API; +using namespace DataObjects; +using namespace Kernel; +namespace Algorithms { + +// Currently we DO NOT REGISTER the algorithm into the AlgorithmFactory. The API +// is different from version 1 and thus cannot replace it without breaking +// scripts. It can be used internally directly without being registered. + +/// Algorithms name for identification. @see Algorithm::name +const std::string ExtractSpectra2::name() const { return "ExtractSpectra2"; } + +/// Algorithm's version for identification. @see Algorithm::version +int ExtractSpectra2::version() const { return 2; } + +/// Algorithm's category for identification. @see Algorithm::category +const std::string ExtractSpectra2::category() const { + return "Transforms\\Splitting"; +} + +/// Algorithm's summary for use in the GUI and help. @see Algorithm::summary +const std::string ExtractSpectra2::summary() const { + return "Extracts a list of spectra from a workspace and places them in a new " + "workspace."; +} + +/// Initialize the algorithm's properties. +void ExtractSpectra2::init() { + declareWorkspaceInputProperties< + MatrixWorkspace, IndexType::SpectrumNum | IndexType::WorkspaceIndex>( + "InputWorkspace", "The input workspace"); + declareProperty(Kernel::make_unique<WorkspaceProperty<>>( + "OutputWorkspace", "", Direction::Output), + "Name of the output workspace"); +} + +/// Executes the algorithm +void ExtractSpectra2::exec() { + boost::shared_ptr<MatrixWorkspace> inputWS; + Indexing::SpectrumIndexSet indexSet; + std::tie(inputWS, indexSet) = + getWorkspaceAndIndices<MatrixWorkspace>("InputWorkspace"); + + auto outputWS = create<MatrixWorkspace>( + *inputWS, dynamic_cast<IndexProperty *>( + getPointerToProperty("InputWorkspaceIndexSet")) + ->getFilteredIndexInfo(), + HistogramData::BinEdges(2)); + + Axis *inAxis1(nullptr); + TextAxis *outTxtAxis(nullptr); + NumericAxis *outNumAxis(nullptr); + if (inputWS->axes() > 1) { + inAxis1 = inputWS->getAxis(1); + auto outAxis1 = outputWS->getAxis(1); + outTxtAxis = dynamic_cast<TextAxis *>(outAxis1); + if (!outTxtAxis) + outNumAxis = dynamic_cast<NumericAxis *>(outAxis1); + } + + Progress prog(this, 0.0, 1.0, indexSet.size()); + for (size_t j = 0; j < indexSet.size(); ++j) { + // Rely on Indexing::IndexSet preserving index order. + const size_t i = indexSet[j]; + // Copy spectrum data, automatically setting up sharing for histogram. + outputWS->getSpectrum(j).copyDataFrom(inputWS->getSpectrum(i)); + + // Copy axis entry, SpectraAxis is implicit in workspace creation + if (outTxtAxis) + outTxtAxis->setLabel(j, inAxis1->label(i)); + else if (outNumAxis) + outNumAxis->setValue(j, inAxis1->operator()(i)); + + // Copy bin masking if it exists. + if (inputWS->hasMaskedBins(i)) + outputWS->setMaskedBins(j, inputWS->maskedBins(i)); + + prog.report(); + } + + setProperty("OutputWorkspace", std::move(outputWS)); +} + +} // namespace Algorithms +} // namespace Mantid diff --git a/Framework/Algorithms/src/HyspecScharpfCorrection.cpp b/Framework/Algorithms/src/HyspecScharpfCorrection.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ac4200f5ed18d990bf4f65222363dad336ba6ff9 --- /dev/null +++ b/Framework/Algorithms/src/HyspecScharpfCorrection.cpp @@ -0,0 +1,248 @@ +#include "MantidAlgorithms/HyspecScharpfCorrection.h" +#include "MantidKernel/BoundedValidator.h" +#include "MantidKernel/CompositeValidator.h" +#include "MantidAPI/InstrumentValidator.h" +#include "MantidAPI/WorkspaceUnitValidator.h" +#include "MantidAPI/Run.h" +#include "MantidAPI/WorkspaceFactory.h" +#include "MantidGeometry/Instrument/ReferenceFrame.h" +#include "MantidGeometry/Instrument.h" +#include "MantidDataObjects/EventWorkspace.h" +#include "MantidAPI/SpectrumInfo.h" + +namespace Mantid { +namespace Algorithms { + +using Mantid::Kernel::Direction; +using Mantid::API::WorkspaceProperty; + +// Register the algorithm into the AlgorithmFactory +DECLARE_ALGORITHM(HyspecScharpfCorrection) + +//---------------------------------------------------------------------------------------------- + +/// Algorithms name for identification. @see Algorithm::name +const std::string HyspecScharpfCorrection::name() const { + return "HyspecScharpfCorrection"; +} + +/// Algorithm's version for identification. @see Algorithm::version +int HyspecScharpfCorrection::version() const { return 1; } + +/// Algorithm's category for identification. @see Algorithm::category +const std::string HyspecScharpfCorrection::category() const { + return "CorrectionFunctions\\SpecialCorrections; Inelastic\\Corrections"; +} + +/// Algorithm's summary for use in the GUI and help. @see Algorithm::summary +const std::string HyspecScharpfCorrection::summary() const { + return "Apply polarization factor as part of getting the spin incoherent " + "scattering"; +} + +//---------------------------------------------------------------------------------------------- +/** Initialize the algorithm's properties. + */ +void HyspecScharpfCorrection::init() { + auto wsValidator = boost::make_shared<Mantid::Kernel::CompositeValidator>(); + wsValidator->add<Mantid::API::WorkspaceUnitValidator>("DeltaE"); + wsValidator->add<Mantid::API::InstrumentValidator>(); + declareProperty(Kernel::make_unique<WorkspaceProperty<API::MatrixWorkspace>>( + "InputWorkspace", "", Direction::Input, wsValidator), + "An input workspace in units of energy transfer."); + + auto angleValidator = + boost::make_shared<Mantid::Kernel::BoundedValidator<double>>(); + angleValidator->setLower(-180.0); + angleValidator->setUpper(180.0); + declareProperty("PolarizationAngle", EMPTY_DBL(), angleValidator, + "In plane angle between polatrization and incident beam" + "Must be between -180 and +180 degrees"); + auto precisionValidator = + boost::make_shared<Mantid::Kernel::BoundedValidator<double>>(); + precisionValidator->setLower(0.0); + precisionValidator->setUpper(1.0); + declareProperty( + "Precision", 0.1, precisionValidator, + "If cosine of twice the " + "Scharpf angle is closer to 0 than the precision, the intensities " + "and errors will be set to 0"); + declareProperty(Kernel::make_unique<WorkspaceProperty<API::MatrixWorkspace>>( + "OutputWorkspace", "", Direction::Output), + "An output workspace."); +} + +//---------------------------------------------------------------------------------------------- +/** Execute the algorithm. + */ +void HyspecScharpfCorrection::exec() { + // Get the workspaces + m_inputWS = this->getProperty("InputWorkspace"); + m_outputWS = this->getProperty("OutputWorkspace"); + m_angle = getProperty("PolarizationAngle"); + m_angle *= M_PI / 180.; + m_precision = getProperty("Precision"); + if (m_inputWS->run().hasProperty("Ei")) { + m_Ei = m_inputWS->run().getPropertyValueAsType<double>("Ei"); + } else { + throw std::invalid_argument( + "No Ei value has been set or stored within the run information."); + } + + // Check if it is an event workspace + if (dynamic_cast<const Mantid::DataObjects::EventWorkspace *>( + m_inputWS.get()) != nullptr) { + this->execEvent(); + return; + } + + // If input and output workspaces are not the same, create a new workspace for + // the output + if (m_outputWS != m_inputWS) { + m_outputWS = API::WorkspaceFactory::Instance().create(m_inputWS); + } + + const auto &spectrumInfo = m_inputWS->spectrumInfo(); + + // Get number of spectra in this workspace + const int64_t numberOfSpectra = + static_cast<int64_t>(m_inputWS->getNumberHistograms()); + Mantid::Kernel::V3D samplePos = spectrumInfo.samplePosition(); + const auto refFrame = m_inputWS->getInstrument()->getReferenceFrame(); + API::Progress prog(this, 0.0, 1.0, numberOfSpectra); + PARALLEL_FOR_IF(Kernel::threadSafe(*m_inputWS, *m_outputWS)) + for (int64_t i = 0; i < numberOfSpectra; ++i) { + PARALLEL_START_INTERUPT_REGION + auto &yOut = m_outputWS->mutableY(i); + auto &eOut = m_outputWS->mutableE(i); + + const auto &xIn = m_inputWS->points(i); // get the centers + auto &yIn = m_inputWS->y(i); + auto &eIn = m_inputWS->e(i); + // Copy the energy transfer axis + m_outputWS->setSharedX(i, m_inputWS->sharedX(i)); + + prog.report(); + // continue if no detectors, if monitor, or is masked + if ((!spectrumInfo.hasDetectors(i)) || spectrumInfo.isMonitor(i) || + spectrumInfo.isMasked(i)) { + continue; + } + // get detector info and calculate the in plane angle + Mantid::Kernel::V3D detPos = spectrumInfo.position(i); + const auto l2 = detPos - samplePos; + const double thPlane = std::atan2(l2[refFrame->pointingHorizontal()], + l2[refFrame->pointingAlongBeam()]); + size_t spectrumSize = xIn.size(); + for (size_t j = 0; j < spectrumSize; ++j) { + double factor = 0.; + if (xIn[j] < m_Ei) { + double kfki = std::sqrt(1. - xIn[j] / m_Ei); // k_f/k_i + factor = static_cast<double>(this->calculateFactor(kfki, thPlane)); + } + yOut[j] = yIn[j] * factor; + eOut[j] = eIn[j] * factor; + } + PARALLEL_END_INTERUPT_REGION + } // end for i + PARALLEL_CHECK_INTERUPT_REGION + this->setProperty("OutputWorkspace", m_outputWS); +} + +/** Execute for events + */ +void HyspecScharpfCorrection::execEvent() { + g_log.information("Processing event workspace"); + + // If input and output workspaces are not the same, create a new workspace for + // the output + if (m_outputWS != m_inputWS) { + m_outputWS = m_inputWS->clone(); + setProperty("OutputWorkspace", m_outputWS); + } + + Mantid::DataObjects::EventWorkspace_sptr eventWS = + boost::dynamic_pointer_cast<Mantid::DataObjects::EventWorkspace>( + m_outputWS); + + const auto &spectrumInfo = m_inputWS->spectrumInfo(); + + // Get number of spectra in this workspace + const int64_t numberOfSpectra = + static_cast<int64_t>(m_inputWS->getNumberHistograms()); + Mantid::Kernel::V3D samplePos = spectrumInfo.samplePosition(); + const auto refFrame = m_inputWS->getInstrument()->getReferenceFrame(); + API::Progress prog(this, 0.0, 1.0, numberOfSpectra); + PARALLEL_FOR_IF(Kernel::threadSafe(*m_inputWS, *m_outputWS)) + for (int64_t i = 0; i < numberOfSpectra; ++i) { + PARALLEL_START_INTERUPT_REGION + prog.report(); + // continue if no detectors, if monitor, or is masked + if ((!spectrumInfo.hasDetectors(i)) || spectrumInfo.isMonitor(i) || + spectrumInfo.isMasked(i)) { + continue; + } + Mantid::Kernel::V3D detPos = spectrumInfo.position(i); + const auto l2 = detPos - samplePos; + const double thPlane = std::atan2(l2[refFrame->pointingHorizontal()], + l2[refFrame->pointingAlongBeam()]); + // Do the correction + auto &evlist = eventWS->getSpectrum(i); + switch (evlist.getEventType()) { + case Mantid::API::TOF: + // Switch to weights if needed. + evlist.switchTo(Mantid::API::WEIGHTED); + /* no break */ + // Fall through + + case Mantid::API::WEIGHTED: + ScharpfEventHelper(evlist.getWeightedEvents(), thPlane); + break; + + case Mantid::API::WEIGHTED_NOTIME: + ScharpfEventHelper(evlist.getWeightedEventsNoTime(), thPlane); + break; + } + PARALLEL_END_INTERUPT_REGION + } // end for i + PARALLEL_CHECK_INTERUPT_REGION +} + +template <class T> +void HyspecScharpfCorrection::ScharpfEventHelper(std::vector<T> &wevector, + double thPlane) { + for (auto it = wevector.begin(); it < wevector.end();) { + double Ef = m_Ei - it->tof(); + if (Ef <= 0) { + it = wevector.erase(it); + } else { + double kfki = std::sqrt(Ef / m_Ei); + + float factor = this->calculateFactor(kfki, thPlane); + + it->m_weight *= factor; + it->m_errorSquared *= factor * factor; + ++it; + } + } +} + +float HyspecScharpfCorrection::calculateFactor(const double kfki, + const double thPlane) { + // angle between in plane Q and z axis + const double angleQ = + std::atan2(-kfki * std::sin(thPlane), 1. - kfki * std::cos(thPlane)); + // Scarpf agle = angle - angleQ + float factor = static_cast<float>(std::cos(2. * (m_angle - angleQ))); + // set intensity to 0 if the Scarpf angle is close to 45 degrees + if (std::abs(factor) > m_precision) { + factor = 1.f / factor; + } else { + factor = 0.; + } + + return (factor); +} + +} // namespace Algorithms +} // namespace Mantid diff --git a/Framework/Algorithms/src/MaskBins.cpp b/Framework/Algorithms/src/MaskBins.cpp index 41ebbe2a3716c1931584222ceca918629711768c..e964c3687154f3b11db6c3fa6b2d7c1372e352bc 100644 --- a/Framework/Algorithms/src/MaskBins.cpp +++ b/Framework/Algorithms/src/MaskBins.cpp @@ -1,11 +1,9 @@ -//---------------------------------------------------------------------- -// Includes -//---------------------------------------------------------------------- #include "MantidAlgorithms/MaskBins.h" #include "MantidAPI/HistogramValidator.h" #include "MantidAPI/WorkspaceFactory.h" #include "MantidKernel/ArrayProperty.h" #include "MantidKernel/BoundedValidator.h" +#include "MantidAPI/Algorithm.tcc" #include <limits> #include <sstream> @@ -20,19 +18,15 @@ DECLARE_ALGORITHM(MaskBins) using namespace Kernel; using namespace API; -using namespace Mantid; -using Mantid::DataObjects::EventWorkspace; -using Mantid::DataObjects::EventWorkspace_sptr; -using Mantid::DataObjects::EventWorkspace_const_sptr; - -MaskBins::MaskBins() : API::Algorithm(), m_startX(0.0), m_endX(0.0) {} +using DataObjects::EventWorkspace; +using DataObjects::EventWorkspace_sptr; +using DataObjects::EventWorkspace_const_sptr; void MaskBins::init() { - declareProperty( - make_unique<WorkspaceProperty<>>( - "InputWorkspace", "", Direction::Input, - boost::make_shared<HistogramValidator>()), - "The name of the input workspace. Must contain histogram data."); + declareWorkspaceInputProperties<MatrixWorkspace>( + "InputWorkspace", + "The name of the input workspace. Must contain histogram data.", + boost::make_shared<HistogramValidator>()); declareProperty(make_unique<WorkspaceProperty<>>("OutputWorkspace", "", Direction::Output), "The name of the Workspace containing the masked bins."); @@ -47,22 +41,14 @@ void MaskBins::init() { declareProperty("XMax", std::numeric_limits<double>::max(), required, "The value to end masking at."); - // which pixels to load - this->declareProperty(make_unique<ArrayProperty<int>>("SpectraList"), - "Optional: A list of individual which spectra to mask " - "(specified using the workspace index). If not set, " - "all spectra are masked. Can be entered as a " - "comma-seperated list of values, or a range (such as " - "'a-b' which will include spectra with workspace index " - "of a to b inclusively)."); + this->declareProperty(make_unique<ArrayProperty<int64_t>>("SpectraList"), + "Deprecated, use InputWorkspaceIndexSet."); } /** Execution code. * @throw std::invalid_argument If XMax is less than XMin */ void MaskBins::exec() { - MatrixWorkspace_const_sptr inputWS = getProperty("InputWorkspace"); - // Check for valid X limits m_startX = getProperty("XMin"); m_endX = getProperty("XMax"); @@ -75,23 +61,21 @@ void MaskBins::exec() { throw std::invalid_argument(msg.str()); } - //--------------------------------------------------------------------------------- - // what spectra (workspace indices) to load. Optional. - this->spectra_list = this->getProperty("SpectraList"); - if (!this->spectra_list.empty()) { - const int numHist = static_cast<int>(inputWS->getNumberHistograms()); - //--- Validate spectra list --- - for (auto wi : this->spectra_list) { - if ((wi < 0) || (wi >= numHist)) { - std::ostringstream oss; - oss << "One of the workspace indices specified, " << wi - << " is above the number of spectra in the workspace (" << numHist - << ")."; - throw std::invalid_argument(oss.str()); - } - } + // Copy indices from legacy property + std::vector<int64_t> spectraList = this->getProperty("SpectraList"); + if (!spectraList.empty()) { + if (!isDefault("InputWorkspaceIndexSet")) + throw std::runtime_error("Cannot provide both InputWorkspaceIndexSet and " + "SpectraList at the same time."); + setProperty("InputWorkspaceIndexSet", spectraList); + g_log.warning("The 'SpectraList' property is deprecated. Use " + "'InputWorkspaceIndexSet' instead."); } + MatrixWorkspace_sptr inputWS; + std::tie(inputWS, indexSet) = + getWorkspaceAndIndices<MatrixWorkspace>("InputWorkspace"); + // Only create the output workspace if it's different to the input one MatrixWorkspace_sptr outputWS = getProperty("OutputWorkspace"); if (outputWS != inputWS) { @@ -99,16 +83,9 @@ void MaskBins::exec() { setProperty("OutputWorkspace", outputWS); } - //--------------------------------------------------------------------------------- - // Now, determine if the input workspace is actually an EventWorkspace - EventWorkspace_const_sptr eventW = - boost::dynamic_pointer_cast<const EventWorkspace>(inputWS); - - if (eventW != nullptr) { - //------- EventWorkspace --------------------------- + if (boost::dynamic_pointer_cast<const EventWorkspace>(inputWS)) { this->execEvent(); } else { - //------- MatrixWorkspace of another kind ------------- MantidVec::difference_type startBin(0), endBin(0); // If the binning is the same throughout, we only need to find the index @@ -119,27 +96,11 @@ void MaskBins::exec() { this->findIndices(X, startBin, endBin); } - const int numHists = static_cast<int>(inputWS->getNumberHistograms()); - Progress progress(this, 0.0, 1.0, numHists); + Progress progress(this, 0.0, 1.0, indexSet.size()); // Parallel running has problems with a race condition, leading to // occaisional test failures and crashes - bool useSpectraList = (!this->spectra_list.empty()); - - // Alter the for loop ending based on what we are looping on - int for_end = numHists; - if (useSpectraList) - for_end = static_cast<int>(this->spectra_list.size()); - - for (int i = 0; i < for_end; ++i) { - // Find the workspace index, either based on the spectra list or all - // spectra - int wi; - if (useSpectraList) - wi = this->spectra_list[i]; - else - wi = i; - + for (const auto wi : indexSet) { MantidVec::difference_type startBinLoop(startBin), endBinLoop(endBin); if (!commonBins) this->findIndices(outputWS->binEdges(wi), startBinLoop, endBinLoop); @@ -150,9 +111,8 @@ void MaskBins::exec() { outputWS->maskBin(wi, j); } progress.report(); - - } // ENDFOR(i) - } // ENDIFELSE(eventworkspace?) + } + } } /** Execution code for EventWorkspaces @@ -161,38 +121,20 @@ void MaskBins::execEvent() { MatrixWorkspace_sptr outputMatrixWS = getProperty("OutputWorkspace"); auto outputWS = boost::dynamic_pointer_cast<EventWorkspace>(outputMatrixWS); - // set up the progress bar - const size_t numHists = outputWS->getNumberHistograms(); - Progress progress(this, 0.0, 1.0, numHists * 2); + Progress progress(this, 0.0, 1.0, outputWS->getNumberHistograms() * 2); - // sort the events outputWS->sortAll(Mantid::DataObjects::TOF_SORT, &progress); - // Go through all histograms - if (!this->spectra_list.empty()) { - // Specific spectra were specified - PARALLEL_FOR_IF(Kernel::threadSafe(*outputWS)) - for (int i = 0; i < static_cast<int>(this->spectra_list.size()); // NOLINT - ++i) { - PARALLEL_START_INTERUPT_REGION - outputWS->getSpectrum(this->spectra_list[i]).maskTof(m_startX, m_endX); - progress.report(); - PARALLEL_END_INTERUPT_REGION - } - PARALLEL_CHECK_INTERUPT_REGION - } else { - // Do all spectra! - PARALLEL_FOR_IF(Kernel::threadSafe(*outputWS)) - for (int64_t i = 0; i < int64_t(numHists); ++i) { - PARALLEL_START_INTERUPT_REGION - outputWS->getSpectrum(i).maskTof(m_startX, m_endX); - progress.report(); - PARALLEL_END_INTERUPT_REGION - } - PARALLEL_CHECK_INTERUPT_REGION + PARALLEL_FOR_IF(Kernel::threadSafe(*outputWS)) + for (int i = 0; i < static_cast<int>(indexSet.size()); // NOLINT + ++i) { + PARALLEL_START_INTERUPT_REGION + outputWS->getSpectrum(indexSet[i]).maskTof(m_startX, m_endX); + progress.report(); + PARALLEL_END_INTERUPT_REGION } + PARALLEL_CHECK_INTERUPT_REGION - // Clear the MRU outputWS->clearMRU(); } diff --git a/Framework/Algorithms/src/MonitorEfficiencyCorUser.cpp b/Framework/Algorithms/src/MonitorEfficiencyCorUser.cpp index 633ca8a38cdf5d5b32f05981e36ba70bb2715ec8..744a0f5aed4c277ecc40eef434ff96b0855e13b5 100644 --- a/Framework/Algorithms/src/MonitorEfficiencyCorUser.cpp +++ b/Framework/Algorithms/src/MonitorEfficiencyCorUser.cpp @@ -1,3 +1,4 @@ + #include "MantidAlgorithms/MonitorEfficiencyCorUser.h" #include "MantidAPI/InstrumentValidator.h" #include "MantidAPI/MatrixWorkspace.h" @@ -37,22 +38,26 @@ void MonitorEfficiencyCorUser::exec() { m_outputWS = this->getProperty("OutputWorkspace"); - if (m_inputWS->getInstrument()->getName() != "TOFTOF") { - std::string message("The input workspace does not come from TOFTOF"); - g_log.error(message); - throw std::invalid_argument(message); - } - // If input and output workspaces are not the same, create a new workspace for // the output if (m_outputWS != this->m_inputWS) { m_outputWS = API::WorkspaceFactory::Instance().create(m_inputWS); } - double val; - Strings::convert(m_inputWS->run().getProperty("Ei")->value(), val); - m_Ei = val; - Strings::convert(m_inputWS->run().getProperty("monitor_counts")->value(), - m_monitorCounts); + m_Ei = m_inputWS->run().getPropertyValueAsType<double>("Ei"); + + std::string mon_counts_log; + + // get name of the monitor counts sample log from the instrument parameter + // file + try { + mon_counts_log = getValFromInstrumentDef("monitor_counts_log"); + } catch (Kernel::Exception::InstrumentDefinitionError) { + // the default value is monitor_counts + mon_counts_log = "monitor_counts"; + } + + m_monitorCounts = + m_inputWS->run().getPropertyValueAsType<double>(mon_counts_log); // get Efficiency formula from the IDF - Parameters file const std::string effFormula = getValFromInstrumentDef("formula_mon_eff"); diff --git a/Framework/Algorithms/src/Rebin.cpp b/Framework/Algorithms/src/Rebin.cpp index b92e3688722ba20b0d69efe29cfff47e0075aa12..62a87396a4b8aa1d3a73c66eb32bad11af3705d2 100644 --- a/Framework/Algorithms/src/Rebin.cpp +++ b/Framework/Algorithms/src/Rebin.cpp @@ -355,10 +355,5 @@ void Rebin::propagateMasks(API::MatrixWorkspace_const_sptr inputWS, } } -Parallel::ExecutionMode Rebin::getParallelExecutionMode( - const std::map<std::string, Parallel::StorageMode> &storageModes) const { - return Parallel::getCorrespondingExecutionMode(storageModes.begin()->second); -} - } // namespace Algorithm } // namespace Mantid diff --git a/Framework/Algorithms/test/CalculatePolynomialBackgroundTest.h b/Framework/Algorithms/test/CalculatePolynomialBackgroundTest.h index 8106f6d6b51eeacf7d9e516f735049abef51639a..19122f111200a432ca1e11103474c94512bf92a5 100644 --- a/Framework/Algorithms/test/CalculatePolynomialBackgroundTest.h +++ b/Framework/Algorithms/test/CalculatePolynomialBackgroundTest.h @@ -98,7 +98,73 @@ public: const auto &bkgEs = outWS->e(histI); const auto &bkgXs = outWS->x(histI); for (size_t binI = 0; binI < nBin; ++binI) { - TS_ASSERT_DELTA(bkgYs[binI], ys[binI], 1e-12) + TS_ASSERT_DELTA(bkgYs[binI], ys[binI], 1e-10) + TS_ASSERT_EQUALS(bkgEs[binI], 0) + TS_ASSERT_EQUALS(bkgXs[binI], xs[binI]) + } + } + } + + void test_costFuctionLeastSquares() { + using namespace WorkspaceCreationHelper; + const size_t nHist{2}; + const HistogramData::Counts counts{0, 4, 0, 0}; + const HistogramData::CountStandardDeviations stdDevs{0, 0.001, 0, 0}; + const HistogramData::BinEdges edges{0, 1, 2, 3, 4}; + API::MatrixWorkspace_sptr ws( + DataObjects::create<DataObjects::Workspace2D>( + nHist, HistogramData::Histogram(edges, counts, stdDevs)).release()); + auto alg = makeAlgorithm(); + TS_ASSERT_THROWS_NOTHING(alg->setProperty("InputWorkspace", ws)) + TS_ASSERT_THROWS_NOTHING(alg->setProperty("OutputWorkspace", "outputWS")) + TS_ASSERT_THROWS_NOTHING(alg->setProperty("Degree", 0)) + TS_ASSERT_THROWS_NOTHING(alg->setProperty("CostFunction", "Least squares")) + TS_ASSERT_THROWS_NOTHING(alg->execute()) + TS_ASSERT(alg->isExecuted()) + API::MatrixWorkspace_sptr outWS = alg->getProperty("OutputWorkspace"); + TS_ASSERT(outWS) + for (size_t histI = 0; histI < nHist; ++histI) { + const auto &xs = ws->x(histI); + const auto &bkgYs = outWS->y(histI); + const auto &bkgEs = outWS->e(histI); + const auto &bkgXs = outWS->x(histI); + for (size_t binI = 0; binI < counts.size(); ++binI) { + // Number 4 in counts is heavily weighted by the small error. + TS_ASSERT_DELTA(bkgYs[binI], 4, 1e-5) + TS_ASSERT_EQUALS(bkgEs[binI], 0) + TS_ASSERT_EQUALS(bkgXs[binI], xs[binI]) + } + } + } + + void test_costFuctionUnweightedLeastSquares() { + using namespace WorkspaceCreationHelper; + const size_t nHist{2}; + const HistogramData::Counts counts{0, 4, 0, 0}; + const HistogramData::BinEdges edges{0, 1, 2, 3, 4}; + API::MatrixWorkspace_sptr ws( + DataObjects::create<DataObjects::Workspace2D>( + nHist, HistogramData::Histogram(edges, counts)).release()); + auto alg = makeAlgorithm(); + TS_ASSERT_THROWS_NOTHING(alg->setProperty("InputWorkspace", ws)) + TS_ASSERT_THROWS_NOTHING(alg->setProperty("OutputWorkspace", "outputWS")) + TS_ASSERT_THROWS_NOTHING(alg->setProperty("Degree", 0)) + TS_ASSERT_THROWS_NOTHING( + alg->setProperty("CostFunction", "Unweighted least squares")) + TS_ASSERT_THROWS_NOTHING(alg->execute()) + TS_ASSERT(alg->isExecuted()) + API::MatrixWorkspace_sptr outWS = alg->getProperty("OutputWorkspace"); + TS_ASSERT(outWS) + // Unweighted fitting of zeroth order polynomial is equivalent to the mean. + const double result = std::accumulate(counts.cbegin(), counts.cend(), 0.0) / + static_cast<double>(counts.size()); + for (size_t histI = 0; histI < nHist; ++histI) { + const auto &xs = ws->x(histI); + const auto &bkgYs = outWS->y(histI); + const auto &bkgEs = outWS->e(histI); + const auto &bkgXs = outWS->x(histI); + for (size_t binI = 0; binI < counts.size(); ++binI) { + TS_ASSERT_DELTA(bkgYs[binI], result, 1e-5) TS_ASSERT_EQUALS(bkgEs[binI], 0) TS_ASSERT_EQUALS(bkgXs[binI], xs[binI]) } @@ -176,7 +242,7 @@ public: const auto &bkgXs = outWS->x(0); const std::vector<double> expected{1.0, 2.0, 3.0, 4.0, 5.0, 6.0}; for (size_t binI = 0; binI < nBin; ++binI) { - TS_ASSERT_DELTA(bkgYs[binI], expected[binI], 1e-12) + TS_ASSERT_DELTA(bkgYs[binI], expected[binI], 1e-10) TS_ASSERT_EQUALS(bkgEs[binI], 0) TS_ASSERT_EQUALS(bkgXs[binI], xs[binI]) } diff --git a/Framework/Algorithms/test/ChangePulsetime2Test.h b/Framework/Algorithms/test/ChangePulsetime2Test.h index 82e15ebe9c9e8371c318eb85c936015a3416f97f..00186cc1e5882b48330ddbbcf295a6761a2e9c19 100644 --- a/Framework/Algorithms/test/ChangePulsetime2Test.h +++ b/Framework/Algorithms/test/ChangePulsetime2Test.h @@ -5,6 +5,7 @@ #include "MantidKernel/Timer.h" #include <cxxtest/TestSuite.h> +#include "MantidAPI/Algorithm.tcc" #include "MantidAlgorithms/ChangePulsetime2.h" #include "MantidDataObjects/EventWorkspace.h" #include "MantidTestHelpers/WorkspaceCreationHelper.h" diff --git a/Framework/Algorithms/test/ExtractSpectra2Test.h b/Framework/Algorithms/test/ExtractSpectra2Test.h new file mode 100644 index 0000000000000000000000000000000000000000..87d6f8bb44f2b8d163efc15e35f9363ef6317293 --- /dev/null +++ b/Framework/Algorithms/test/ExtractSpectra2Test.h @@ -0,0 +1,124 @@ +#ifndef MANTID_ALGORITHMS_EXTRACTSPECTRA2TEST_H_ +#define MANTID_ALGORITHMS_EXTRACTSPECTRA2TEST_H_ + +#include <cxxtest/TestSuite.h> + +#include "MantidAlgorithms/ExtractSpectra2.h" +#include "MantidDataObjects/Workspace2D.h" +#include "MantidDataObjects/WorkspaceCreation.h" +#include "MantidAPI/AnalysisDataService.h" +#include "MantidIndexing/IndexInfo.h" + +#include "MantidTestHelpers/FakeObjects.h" +#include "MantidTestHelpers/ParallelAlgorithmCreation.h" +#include "MantidTestHelpers/ParallelRunner.h" + +using Mantid::Algorithms::ExtractSpectra2; +using namespace Mantid; +using namespace API; +using namespace Kernel; +using namespace DataObjects; +using namespace HistogramData; + +namespace { +void run_parallel(const Parallel::Communicator &comm) { + Indexing::IndexInfo indexInfo(1000, Parallel::StorageMode::Distributed, comm); + auto alg = ParallelTestHelpers::create<ExtractSpectra2>(comm); + alg->setProperty("InputWorkspace", create<Workspace2D>(indexInfo, Points(1))); + alg->setProperty("InputWorkspaceIndexSet", + "0-" + std::to_string(comm.size())); + TS_ASSERT_THROWS_NOTHING(alg->execute()); + MatrixWorkspace_const_sptr out = alg->getProperty("OutputWorkspace"); + TS_ASSERT_EQUALS(out->storageMode(), Parallel::StorageMode::Distributed); + if (0 % comm.size() == comm.rank()) { + TS_ASSERT_EQUALS(out->getNumberHistograms(), 2); + } else { + TS_ASSERT_EQUALS(out->getNumberHistograms(), 1); + } +} + +boost::shared_ptr<Workspace2D> createWorkspace() { + auto ws = create<Workspace2D>(5, Points(1)); + ws->setHistogram(0, Points{0.0}, Counts{1.0}); + ws->setHistogram(1, Points{1.0}, Counts{1.0}); + ws->setHistogram(2, Points{2.0}, Counts{1.0}); + ws->setHistogram(3, Points{3.0}, Counts{1.0}); + ws->setHistogram(4, Points{4.0}, Counts{1.0}); + return std::move(ws); +} +} + +class ExtractSpectra2Test : public CxxTest::TestSuite { +public: + // This pair of boilerplate methods prevent the suite being created statically + // This means the constructor isn't called when running other tests + static ExtractSpectra2Test *createSuite() { + return new ExtractSpectra2Test(); + } + static void destroySuite(ExtractSpectra2Test *suite) { delete suite; } + + void test_full() { + auto input = createWorkspace(); + ExtractSpectra2 alg; + alg.initialize(); + alg.setProperty("InputWorkspace", std::move(input)); + alg.setProperty("OutputWorkspace", "out"); + alg.execute(); + auto ws = + AnalysisDataService::Instance().retrieveWS<MatrixWorkspace>("out"); + TS_ASSERT_EQUALS(ws->getNumberHistograms(), 5); + } + + void test_reorder() { + auto input = createWorkspace(); + ExtractSpectra2 alg; + alg.initialize(); + alg.setProperty("InputWorkspace", std::move(input)); + alg.setProperty("InputWorkspaceIndexSet", "4,0-3"); + alg.setProperty("OutputWorkspace", "out"); + alg.execute(); + auto ws = + AnalysisDataService::Instance().retrieveWS<MatrixWorkspace>("out"); + TS_ASSERT_EQUALS(ws->getNumberHistograms(), 5); + const auto &indexInfo = ws->indexInfo(); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(0), 5); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(1), 1); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(2), 2); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(3), 3); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(4), 4); + TS_ASSERT_EQUALS(ws->getSpectrum(0).getSpectrumNo(), 5); + TS_ASSERT_EQUALS(ws->getSpectrum(1).getSpectrumNo(), 1); + TS_ASSERT_EQUALS(ws->getSpectrum(2).getSpectrumNo(), 2); + TS_ASSERT_EQUALS(ws->getSpectrum(3).getSpectrumNo(), 3); + TS_ASSERT_EQUALS(ws->getSpectrum(4).getSpectrumNo(), 4); + TS_ASSERT_EQUALS(ws->x(0)[0], 4.0); + TS_ASSERT_EQUALS(ws->x(1)[0], 0.0); + TS_ASSERT_EQUALS(ws->x(2)[0], 1.0); + TS_ASSERT_EQUALS(ws->x(3)[0], 2.0); + TS_ASSERT_EQUALS(ws->x(4)[0], 3.0); + } + + void test_extract() { + auto input = createWorkspace(); + ExtractSpectra2 alg; + alg.initialize(); + alg.setProperty("InputWorkspace", std::move(input)); + alg.setProperty("InputWorkspaceIndexSet", "4,1-2"); + alg.setProperty("OutputWorkspace", "out"); + alg.execute(); + auto ws = + AnalysisDataService::Instance().retrieveWS<MatrixWorkspace>("out"); + TS_ASSERT_EQUALS(ws->getNumberHistograms(), 3); + const auto &indexInfo = ws->indexInfo(); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(0), 5); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(1), 2); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(2), 3); + TS_ASSERT_EQUALS(ws->x(0)[0], 4.0); + TS_ASSERT_EQUALS(ws->x(1)[0], 1.0); + TS_ASSERT_EQUALS(ws->x(2)[0], 2.0); + } + + void test_parallel() { ParallelTestHelpers::runParallel(run_parallel); } +}; + +#endif /* MANTID_ALGORITHMS_EXTRACTSPECTRA2TEST_H_ */ diff --git a/Framework/Algorithms/test/ExtractSpectraTest.h b/Framework/Algorithms/test/ExtractSpectraTest.h index dc3654efe3abe35d66b9a4246cd8625747538c1b..43124749136f226a2e3e1032fa8a791e10bea25a 100644 --- a/Framework/Algorithms/test/ExtractSpectraTest.h +++ b/Framework/Algorithms/test/ExtractSpectraTest.h @@ -7,15 +7,69 @@ #include "MantidAPI/SpectrumInfo.h" #include "MantidAlgorithms/ExtractSpectra.h" #include "MantidDataObjects/EventWorkspace.h" +#include "MantidDataObjects/WorkspaceCreation.h" #include "MantidKernel/UnitFactory.h" +#include "MantidIndexing/IndexInfo.h" + #include "MantidTestHelpers/ComponentCreationHelper.h" +#include "MantidTestHelpers/ParallelAlgorithmCreation.h" +#include "MantidTestHelpers/ParallelRunner.h" #include "MantidTestHelpers/WorkspaceCreationHelper.h" using Mantid::Algorithms::ExtractSpectra; -using namespace Mantid::API; -using namespace Mantid::Kernel; -using namespace Mantid::DataObjects; using namespace Mantid; +using namespace API; +using namespace Kernel; +using namespace DataObjects; +using namespace HistogramData; + +namespace { +void run_parallel_DetectorList_fails(const Parallel::Communicator &comm) { + Indexing::IndexInfo indexInfo(1000, Parallel::StorageMode::Distributed, comm); + auto alg = ParallelTestHelpers::create<ExtractSpectra>(comm); + alg->setProperty("InputWorkspace", create<Workspace2D>(indexInfo, Points(1))); + alg->setProperty("DetectorList", "1"); + if (comm.size() == 1) { + TS_ASSERT_THROWS_NOTHING(alg->execute()); + } else { + TS_ASSERT_THROWS_EQUALS( + alg->execute(), const std::runtime_error &e, std::string(e.what()), + "MatrixWorkspace: Using getIndicesFromDetectorIDs in " + "a parallel run is most likely incorrect. Aborting."); + } +} + +void run_parallel_WorkspaceIndexList(const Parallel::Communicator &comm) { + Indexing::IndexInfo indexInfo(1000, Parallel::StorageMode::Distributed, comm); + auto alg = ParallelTestHelpers::create<ExtractSpectra>(comm); + alg->setProperty("InputWorkspace", create<Workspace2D>(indexInfo, Points(1))); + alg->setProperty("WorkspaceIndexList", "0-" + std::to_string(comm.size())); + TS_ASSERT_THROWS_NOTHING(alg->execute()); + MatrixWorkspace_const_sptr out = alg->getProperty("OutputWorkspace"); + TS_ASSERT_EQUALS(out->storageMode(), Parallel::StorageMode::Distributed); + if (comm.rank() == 0) { + TS_ASSERT_EQUALS(out->getNumberHistograms(), 2); + } else { + TS_ASSERT_EQUALS(out->getNumberHistograms(), 1); + } +} + +void run_parallel_WorkspaceIndexRange(const Parallel::Communicator &comm) { + Indexing::IndexInfo indexInfo(3 * comm.size(), + Parallel::StorageMode::Distributed, comm); + auto alg = ParallelTestHelpers::create<ExtractSpectra>(comm); + alg->setProperty("InputWorkspace", create<Workspace2D>(indexInfo, Points(1))); + alg->setProperty("StartWorkspaceIndex", std::to_string(comm.size() + 1)); + TS_ASSERT_THROWS_NOTHING(alg->execute()); + MatrixWorkspace_const_sptr out = alg->getProperty("OutputWorkspace"); + TS_ASSERT_EQUALS(out->storageMode(), Parallel::StorageMode::Distributed); + if (comm.rank() == 0) { + TS_ASSERT_EQUALS(out->getNumberHistograms(), 1); + } else { + TS_ASSERT_EQUALS(out->getNumberHistograms(), 2); + } +} +} class ExtractSpectraTest : public CxxTest::TestSuite { public: @@ -262,12 +316,7 @@ public: void test_invalid_x_range_event() { Parameters params("event"); params.setInvalidXRange(); - auto ws = runAlgorithm(params, true); - // this is a bit unexpected but at least no crash - TS_ASSERT_EQUALS(ws->getNumberHistograms(), nSpec); - TS_ASSERT_EQUALS(ws->blocksize(), 1); - TS_ASSERT_EQUALS(ws->x(0)[0], 2); - TS_ASSERT_EQUALS(ws->x(0)[1], 1); + auto ws = runAlgorithm(params, false); } void test_invalid_index_range_event() { @@ -385,6 +434,16 @@ public: auto ws = runAlgorithm(params, false); } + void test_parallel_DetectorList_fails() { + ParallelTestHelpers::runParallel(run_parallel_DetectorList_fails); + } + void test_parallel_WorkspaceIndexList() { + ParallelTestHelpers::runParallel(run_parallel_WorkspaceIndexList); + } + void test_parallel_WorkspaceIndexRange() { + ParallelTestHelpers::runParallel(run_parallel_WorkspaceIndexRange); + } + private: // ----------------------- helper methods ------------------------ diff --git a/Framework/Algorithms/test/HyspecScharpfCorrectionTest.h b/Framework/Algorithms/test/HyspecScharpfCorrectionTest.h new file mode 100644 index 0000000000000000000000000000000000000000..952f382003c0632c0fd0bcf1c1b41788f03baf60 --- /dev/null +++ b/Framework/Algorithms/test/HyspecScharpfCorrectionTest.h @@ -0,0 +1,72 @@ +#ifndef MANTID_ALGORITHMS_HYSPECSCHARPFCORRECTIONTEST_H_ +#define MANTID_ALGORITHMS_HYSPECSCHARPFCORRECTIONTEST_H_ + +#include <cxxtest/TestSuite.h> + +#include "MantidAlgorithms/HyspecScharpfCorrection.h" +#include "MantidTestHelpers/WorkspaceCreationHelper.h" + +using Mantid::Algorithms::HyspecScharpfCorrection; + +class HyspecScharpfCorrectionTest : public CxxTest::TestSuite { +public: + // This pair of boilerplate methods prevent the suite being created statically + // This means the constructor isn't called when running other tests + static HyspecScharpfCorrectionTest *createSuite() { + return new HyspecScharpfCorrectionTest(); + } + static void destroySuite(HyspecScharpfCorrectionTest *suite) { delete suite; } + + void test_Init() { + HyspecScharpfCorrection alg; + TS_ASSERT_THROWS_NOTHING(alg.initialize()) + TS_ASSERT(alg.isInitialized()) + } + + void test_exec() { + // Create test input + std::vector<double> L2 = {1.0}, polar = {M_PI_4}, azimuthal = {0.}; + auto inputWS = WorkspaceCreationHelper::createProcessedInelasticWS( + L2, polar, azimuthal, 30, -10, 20, 17.1); + HyspecScharpfCorrection alg; + + alg.setChild(true); + alg.setRethrows(true); + TS_ASSERT_THROWS_NOTHING(alg.initialize()) + TS_ASSERT(alg.isInitialized()) + TS_ASSERT_THROWS_NOTHING(alg.setProperty("InputWorkspace", inputWS)); + TS_ASSERT_THROWS_NOTHING(alg.setPropertyValue( + "OutputWorkspace", "HyspecScharpfCorrectionOutput")); + TS_ASSERT_THROWS_NOTHING(alg.setProperty("PolarizationAngle", -11.0)); + TS_ASSERT_THROWS_NOTHING(alg.execute();); + TS_ASSERT(alg.isExecuted()); + + // Retrieve the workspace from the algorithm. + Mantid::API::MatrixWorkspace_sptr outputWS = + alg.getProperty("OutputWorkspace"); + TS_ASSERT(outputWS); + auto histo = outputWS->histogram(0); + auto x = histo.points(); + auto y = histo.y(); + for (size_t i = 0; i < x.size(); ++i) { + if (x[i] < 4) { + TS_ASSERT_LESS_THAN(y[i], 0); + } else if (x[i] < 6. || x[i] > 17.) { + TS_ASSERT_EQUALS(y[i], 0.); + } else { + TS_ASSERT_LESS_THAN(0, y[i]); + } + } + // test one value, say DeltaE=6.5 + double kikf = std::sqrt(1. - 6.5 / 17.1); + auto alpha = + std::atan2(-kikf * std::sin(M_PI_4), 1. - kikf * std::cos(M_PI_4)) + + 11. * M_PI / 180.; + TS_ASSERT_DELTA(x[16], 6.5, 1e-10); + // note that it does the correction factor as a float + // as it is a common code with events + TS_ASSERT_DELTA(y[16], 1. / std::cos(2. * alpha), 1e-6); + } +}; + +#endif /* MANTID_ALGORITHMS_HYSPECSCHARPFCORRECTIONTEST_H_ */ diff --git a/Framework/Algorithms/test/MonitorEfficiencyCorUserTest.h b/Framework/Algorithms/test/MonitorEfficiencyCorUserTest.h index 2827781ef6184f870eadf4b34bf0e5626205be1b..1ed86a0bd8d3593b480b48ec4337279146873bd8 100644 --- a/Framework/Algorithms/test/MonitorEfficiencyCorUserTest.h +++ b/Framework/Algorithms/test/MonitorEfficiencyCorUserTest.h @@ -94,7 +94,7 @@ public: private: double m_Ei; - int m_monitor_counts; + double m_monitor_counts; const std::string m_inWSName, m_outWSName; MonitorEfficiencyCorUser alg; @@ -115,10 +115,8 @@ private: } dataws->getAxis(0)->setUnit("TOF"); - dataws->mutableRun().addProperty("Ei", - boost::lexical_cast<std::string>(m_Ei)); - dataws->mutableRun().addProperty( - "monitor_counts", boost::lexical_cast<std::string>(m_monitor_counts)); + dataws->mutableRun().addProperty("Ei", m_Ei); + dataws->mutableRun().addProperty("monitor_counts", m_monitor_counts); dataws->instrumentParameters().addString( dataws->getInstrument()->getChild(0).get(), "formula_mon_eff", @@ -143,10 +141,8 @@ public: 100000, 2000, false, false, true, "TOFTOF"); input->getAxis(0)->setUnit("TOF"); - input->mutableRun().addProperty("Ei", - boost::lexical_cast<std::string>(3.27)); - input->mutableRun().addProperty("monitor_counts", - boost::lexical_cast<std::string>(1000)); + input->mutableRun().addProperty("Ei", 3.27); + input->mutableRun().addProperty("monitor_counts", 1000.0); input->instrumentParameters().addString( input->getInstrument()->getChild(0).get(), "formula_mon_eff", diff --git a/Framework/DataHandling/CMakeLists.txt b/Framework/DataHandling/CMakeLists.txt index 454d0cf521c364e1ab32166d96498a8c360bfa3a..748db0f0be2a7540e5d02866fa120c52c6503247 100644 --- a/Framework/DataHandling/CMakeLists.txt +++ b/Framework/DataHandling/CMakeLists.txt @@ -12,6 +12,7 @@ set ( SRC_FILES src/DataBlock.cpp src/DataBlockComposite.cpp src/DataBlockGenerator.cpp + src/DefaultEventLoader.cpp src/DefineGaugeVolume.cpp src/DeleteTableRows.cpp src/DetermineChunking.cpp @@ -33,6 +34,7 @@ set ( SRC_FILES src/LoadAscii.cpp src/LoadAscii2.cpp src/LoadBBY.cpp + src/LoadBankFromDiskTask.cpp src/LoadCalFile.cpp src/LoadCanSAS1D.cpp src/LoadCanSAS1D2.cpp @@ -43,6 +45,7 @@ set ( SRC_FILES src/LoadDspacemap.cpp src/LoadEmptyInstrument.cpp src/LoadEventNexus.cpp + src/LoadEventNexusIndexSetup.cpp src/LoadEventPreNexus2.cpp src/LoadFITS.cpp src/LoadFullprofResolution.cpp @@ -50,7 +53,7 @@ set ( SRC_FILES src/LoadGSS.cpp src/LoadHelper.cpp src/LoadIDFFromNexus.cpp - src/LoadILLDiffraction.cpp + src/LoadILLDiffraction.cpp src/LoadILLIndirect2.cpp src/LoadILLReflectometry.cpp src/LoadILLSANS.cpp @@ -93,8 +96,8 @@ set ( SRC_FILES src/LoadRawBin0.cpp src/LoadRawHelper.cpp src/LoadRawSpectrum0.cpp - src/LoadSINQFocus.cpp src/LoadSESANS.cpp + src/LoadSINQFocus.cpp src/LoadSNSspec.cpp src/LoadSPE.cpp src/LoadSampleDetailsFromRaw.cpp @@ -114,6 +117,7 @@ set ( SRC_FILES src/MoveInstrumentComponent.cpp src/NexusTester.cpp src/PDLoadCharacterizations.cpp + src/ParallelEventLoader.cpp src/PatchBBY.cpp src/ProcessBankData.cpp src/RawFileInfo.cpp @@ -186,6 +190,7 @@ set ( INC_FILES inc/MantidDataHandling/DataBlock.h inc/MantidDataHandling/DataBlockComposite.h inc/MantidDataHandling/DataBlockGenerator.h + inc/MantidDataHandling/DefaultEventLoader.h inc/MantidDataHandling/DefineGaugeVolume.h inc/MantidDataHandling/DeleteTableRows.h inc/MantidDataHandling/DetermineChunking.h @@ -207,6 +212,7 @@ set ( INC_FILES inc/MantidDataHandling/LoadAscii.h inc/MantidDataHandling/LoadAscii2.h inc/MantidDataHandling/LoadBBY.h + inc/MantidDataHandling/LoadBankFromDiskTask.h inc/MantidDataHandling/LoadCalFile.h inc/MantidDataHandling/LoadCanSAS1D.h inc/MantidDataHandling/LoadCanSAS1D2.h @@ -217,6 +223,7 @@ set ( INC_FILES inc/MantidDataHandling/LoadDspacemap.h inc/MantidDataHandling/LoadEmptyInstrument.h inc/MantidDataHandling/LoadEventNexus.h + inc/MantidDataHandling/LoadEventNexusIndexSetup.h inc/MantidDataHandling/LoadEventPreNexus2.h inc/MantidDataHandling/LoadFITS.h inc/MantidDataHandling/LoadFullprofResolution.h @@ -224,7 +231,7 @@ set ( INC_FILES inc/MantidDataHandling/LoadGSS.h inc/MantidDataHandling/LoadHelper.h inc/MantidDataHandling/LoadIDFFromNexus.h - inc/MantidDataHandling/LoadILLDiffraction.h + inc/MantidDataHandling/LoadILLDiffraction.h inc/MantidDataHandling/LoadILLIndirect2.h inc/MantidDataHandling/LoadILLReflectometry.h inc/MantidDataHandling/LoadILLSANS.h @@ -284,6 +291,7 @@ set ( INC_FILES inc/MantidDataHandling/NXcanSASDefinitions.h inc/MantidDataHandling/NexusTester.h inc/MantidDataHandling/PDLoadCharacterizations.h + inc/MantidDataHandling/ParallelEventLoader.h inc/MantidDataHandling/PatchBBY.h inc/MantidDataHandling/ProcessBankData.h inc/MantidDataHandling/RawFileInfo.h @@ -386,6 +394,7 @@ set ( TEST_FILES LoadDiffCalTest.h LoadDspacemapTest.h LoadEmptyInstrumentTest.h + LoadEventNexusIndexSetupTest.h LoadEventNexusTest.h LoadEventPreNexus2Test.h LoadFITSTest.h @@ -394,7 +403,7 @@ set ( TEST_FILES LoadGSSTest.h LoadIDFFromNexusTest.h LoadILLDiffractionTest.h - LoadILLIndirect2Test.h + LoadILLIndirect2Test.h LoadILLReflectometryTest.h LoadILLSANSTest.h LoadILLTOF2Test.h diff --git a/Framework/DataHandling/inc/MantidDataHandling/CompressEvents.h b/Framework/DataHandling/inc/MantidDataHandling/CompressEvents.h index e9f1ea84fdb3b94fb84dae8ebd9e8e43bfc6e2f4..aed79f9023a03f767d8f73a7e579ae345568cabe 100644 --- a/Framework/DataHandling/inc/MantidDataHandling/CompressEvents.h +++ b/Framework/DataHandling/inc/MantidDataHandling/CompressEvents.h @@ -1,10 +1,7 @@ #ifndef MANTID_DATAHANDLING_COMPRESSEVENTS_H_ #define MANTID_DATAHANDLING_COMPRESSEVENTS_H_ -//---------------------------------------------------------------------- -// Includes -//---------------------------------------------------------------------- -#include "MantidAPI/Algorithm.h" +#include "MantidAPI/ParallelAlgorithm.h" namespace Mantid { namespace DataHandling { @@ -43,7 +40,7 @@ namespace DataHandling { File change history is stored at: <https://github.com/mantidproject/mantid>. Code Documentation is available at: <http://doxygen.mantidproject.org> */ -class DLLExport CompressEvents : public API::Algorithm { +class DLLExport CompressEvents : public API::ParallelAlgorithm { public: /// Algorithm's name for identification overriding a virtual method const std::string name() const override { return "CompressEvents"; }; diff --git a/Framework/DataHandling/inc/MantidDataHandling/DefaultEventLoader.h b/Framework/DataHandling/inc/MantidDataHandling/DefaultEventLoader.h new file mode 100644 index 0000000000000000000000000000000000000000..6a2e46191a1128e1f8c0afbff3726894ac4358ee --- /dev/null +++ b/Framework/DataHandling/inc/MantidDataHandling/DefaultEventLoader.h @@ -0,0 +1,172 @@ +#ifndef MANTID_DATAHANDLING_DEFAULTEVENTLOADER_H_ +#define MANTID_DATAHANDLING_DEFAULTEVENTLOADER_H_ + +#include "MantidDataHandling/DllConfig.h" +#include "MantidDataHandling/EventWorkspaceCollection.h" +#include "MantidAPI/Axis.h" + +class BankPulseTimes; + +namespace Mantid { +namespace DataHandling { +class LoadEventNexus; + +/** Helper class for LoadEventNexus that is specific to the current default + loading code for NXevent_data entries in Nexus files, in particular + LoadBankFromDiskTask and ProcessBankData. + + Copyright © 2017 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge + National Laboratory & European Spallation Source + + This file is part of Mantid. + + Mantid is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + Mantid is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. + + File change history is stored at: <https://github.com/mantidproject/mantid> + Code Documentation is available at: <http://doxygen.mantidproject.org> +*/ +class MANTID_DATAHANDLING_DLL DefaultEventLoader { +public: + static void + load(LoadEventNexus *alg, EventWorkspaceCollection &ws, bool haveWeights, + bool event_id_is_spec, std::vector<std::string> bankNames, + const std::vector<int> &periodLog, const std::string &classType, + std::vector<std::size_t> bankNumEvents, const bool oldNeXusFileNames, + const bool precount, const int chunk, const int totalChunks); + + /// Flag for dealing with a simulated file + bool m_haveWeights; + + /// True if the event_id is spectrum no not pixel ID + bool event_id_is_spec; + + /// whether or not to launch multiple ProcessBankData jobs per bank + bool splitProcessing; + + /// Do we pre-count the # of events in each pixel ID? + bool precount; + + /// Offset in the pixelID_to_wi_vector to use. + detid_t pixelID_to_wi_offset; + + /// Maximum (inclusive) event ID possible for this instrument + int32_t eventid_max{0}; + + /// chunk number + int chunk; + /// number of chunks + int totalChunks; + /// for multiple chunks per bank + int firstChunkForBank; + /// number of chunks per bank + size_t eventsPerChunk; + + LoadEventNexus *alg; + EventWorkspaceCollection &m_ws; + + /// Vector where index = event_id; value = ptr to std::vector<TofEvent> in the + /// event list. + std::vector<std::vector<std::vector<Mantid::Types::Event::TofEvent> *>> + eventVectors; + + /// Vector where index = event_id; value = ptr to std::vector<WeightedEvent> + /// in the event list. + std::vector<std::vector<std::vector<Mantid::DataObjects::WeightedEvent> *>> + weightedEventVectors; + + /// Vector where (index = pixel ID+pixelID_to_wi_offset), value = workspace + /// index) + std::vector<size_t> pixelID_to_wi_vector; + + /// One entry of pulse times for each preprocessor + std::vector<boost::shared_ptr<BankPulseTimes>> m_bankPulseTimes; + +private: + DefaultEventLoader(LoadEventNexus *alg, EventWorkspaceCollection &ws, + bool haveWeights, bool event_id_is_spec, + const size_t numBanks, const bool precount, + const int chunk, const int totalChunks); + std::pair<size_t, size_t> + setupChunking(std::vector<std::string> &bankNames, + std::vector<std::size_t> &bankNumEvents); + /// Map detector IDs to event lists. + template <class T> + void makeMapToEventLists(std::vector<std::vector<T>> &vectors); +}; + +/** Generate a look-up table where the index = the pixel ID of an event +* and the value = a pointer to the EventList in the workspace +* @param vectors :: the array to create the map on +*/ +template <class T> +void DefaultEventLoader::makeMapToEventLists( + std::vector<std::vector<T>> &vectors) { + vectors.resize(m_ws.nPeriods()); + if (event_id_is_spec) { + // Find max spectrum no + auto *ax1 = m_ws.getAxis(1); + specnum_t maxSpecNo = + -std::numeric_limits<specnum_t>::max(); // So that any number will be + // greater than this + for (size_t i = 0; i < ax1->length(); i++) { + specnum_t spec = ax1->spectraNo(i); + if (spec > maxSpecNo) + maxSpecNo = spec; + } + + // These are used by the bank loader to figure out where to put the events + // The index of eventVectors is a spectrum number so it is simply resized to + // the maximum + // possible spectrum number + eventid_max = maxSpecNo; + for (size_t i = 0; i < vectors.size(); ++i) { + vectors[i].resize(maxSpecNo + 1, nullptr); + } + for (size_t period = 0; period < m_ws.nPeriods(); ++period) { + for (size_t i = 0; i < m_ws.getNumberHistograms(); ++i) { + const auto &spec = m_ws.getSpectrum(i); + getEventsFrom(m_ws.getSpectrum(i, period), + vectors[period][spec.getSpectrumNo()]); + } + } + } else { + // To avoid going out of range in the vector, this is the MAX index that can + // go into it + eventid_max = static_cast<int32_t>(pixelID_to_wi_vector.size()) + + pixelID_to_wi_offset; + + // Make an array where index = pixel ID + // Set the value to NULL by default + for (size_t i = 0; i < vectors.size(); ++i) { + vectors[i].resize(eventid_max + 1, nullptr); + } + + for (size_t j = size_t(pixelID_to_wi_offset); + j < pixelID_to_wi_vector.size(); j++) { + size_t wi = pixelID_to_wi_vector[j]; + // Save a POINTER to the vector + if (wi < m_ws.getNumberHistograms()) { + for (size_t period = 0; period < m_ws.nPeriods(); ++period) { + getEventsFrom(m_ws.getSpectrum(wi, period), + vectors[period][j - pixelID_to_wi_offset]); + } + } + } + } +} + +} // namespace DataHandling +} // namespace Mantid + +#endif /* MANTID_DATAHANDLING_DEFAULTEVENTLOADER_H_ */ diff --git a/Framework/DataHandling/inc/MantidDataHandling/EventWorkspaceCollection.h b/Framework/DataHandling/inc/MantidDataHandling/EventWorkspaceCollection.h index 5a1ad0a08dad00488ac2a6f0497ae3298e212321..736817697cba0cd670b81facece7e52c69ef3bb8 100644 --- a/Framework/DataHandling/inc/MantidDataHandling/EventWorkspaceCollection.h +++ b/Framework/DataHandling/inc/MantidDataHandling/EventWorkspaceCollection.h @@ -13,6 +13,9 @@ #include <memory> namespace Mantid { +namespace Indexing { +class IndexInfo; +} namespace DataHandling { /** EventWorkspaceCollection : Collection of EventWorspaces to give @@ -96,13 +99,11 @@ public: Types::Core::DateAndTime getFirstPulseTime() const; void setAllX(const HistogramData::BinEdges &x); size_t getNumberEvents() const; - void resizeTo(const size_t size); - void padSpectra(const std::vector<int32_t> &padding); + void setIndexInfo(const Indexing::IndexInfo &indexInfo); void setInstrument(const Geometry::Instrument_const_sptr &inst); void setMonitorWorkspace(const boost::shared_ptr<API::MatrixWorkspace> &monitorWS); void updateSpectraUsing(const API::SpectrumDetectorMapping &map); - void populateInstrumentParameters(); void setTitle(std::string title); void applyFilter(boost::function<void(API::MatrixWorkspace_sptr)> func); virtual bool threadSafe() const; diff --git a/Framework/DataHandling/inc/MantidDataHandling/LoadBankFromDiskTask.h b/Framework/DataHandling/inc/MantidDataHandling/LoadBankFromDiskTask.h new file mode 100644 index 0000000000000000000000000000000000000000..7e97544392a7bf027ea75f438e178d90d507101a --- /dev/null +++ b/Framework/DataHandling/inc/MantidDataHandling/LoadBankFromDiskTask.h @@ -0,0 +1,104 @@ +#ifndef MANTID_DATAHANDLING_LOADBANKFROMDISKTASK_H_ +#define MANTID_DATAHANDLING_LOADBANKFROMDISKTASK_H_ + +#include "MantidDataHandling/DllConfig.h" +#include "MantidAPI/Progress.h" +#include "MantidKernel/Task.h" +#include "MantidKernel/ThreadScheduler.h" + +#include <nexus/NeXusFile.hpp> + +class BankPulseTimes; + +namespace Mantid { +namespace DataHandling { +class DefaultEventLoader; + +/** This task does the disk IO from loading the NXS file, and so will be on a + disk IO mutex + + Copyright © 2017 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge + National Laboratory & European Spallation Source + + This file is part of Mantid. + + Mantid is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + Mantid is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. + + File change history is stored at: <https://github.com/mantidproject/mantid> + Code Documentation is available at: <http://doxygen.mantidproject.org> +*/ +class MANTID_DATAHANDLING_DLL LoadBankFromDiskTask : public Kernel::Task { + +public: + LoadBankFromDiskTask(DefaultEventLoader &loader, + const std::string &entry_name, + const std::string &entry_type, + const std::size_t numEvents, + const bool oldNeXusFileNames, API::Progress *prog, + boost::shared_ptr<std::mutex> ioMutex, + Kernel::ThreadScheduler &scheduler, + const std::vector<int> &framePeriodNumbers); + + void run() override; + +private: + void loadPulseTimes(::NeXus::File &file); + void loadEventIndex(::NeXus::File &file, std::vector<uint64_t> &event_index); + void prepareEventId(::NeXus::File &file, size_t &start_event, + size_t &stop_event, std::vector<uint64_t> &event_index); + void loadEventId(::NeXus::File &file); + void loadTof(::NeXus::File &file); + void loadEventWeights(::NeXus::File &file); + int64_t recalculateDataSize(const int64_t &size); + + /// Algorithm being run + DefaultEventLoader &m_loader; + /// NXS path to bank + std::string entry_name; + /// NXS type + std::string entry_type; + /// Progress reporting + API::Progress *prog; + /// ThreadScheduler running this task + Kernel::ThreadScheduler &scheduler; + /// Object with the pulse times for this bank + boost::shared_ptr<BankPulseTimes> thisBankPulseTimes; + /// Did we get an error in loading + bool m_loadError; + /// Old names in the file? + bool m_oldNexusFileNames; + /// Index to load start at in the file + std::vector<int> m_loadStart; + /// How much to load in the file + std::vector<int> m_loadSize; + /// Event pixel ID data + uint32_t *m_event_id; + /// Minimum pixel ID in this data + uint32_t m_min_id; + /// Maximum pixel ID in this data + uint32_t m_max_id; + /// TOF data + float *m_event_time_of_flight; + /// Flag for simulated data + bool m_have_weight; + /// Event weights + float *m_event_weight; + /// Frame period numbers + const std::vector<int> m_framePeriodNumbers; +}; // END-DEF-CLASS LoadBankFromDiskTask + +} // namespace DataHandling +} // namespace Mantid + +#endif /* MANTID_DATAHANDLING_LOADBANKFROMDISKTASK_H_ */ diff --git a/Framework/DataHandling/inc/MantidDataHandling/LoadEventNexus.h b/Framework/DataHandling/inc/MantidDataHandling/LoadEventNexus.h index 1c0bf4af3fa9cf7c773292f6439563e2886ad6e9..3918e0a2e373ed6a909e79b7f6b1a69bcfb368c0 100644 --- a/Framework/DataHandling/inc/MantidDataHandling/LoadEventNexus.h +++ b/Framework/DataHandling/inc/MantidDataHandling/LoadEventNexus.h @@ -1,9 +1,6 @@ #ifndef MANTID_DATAHANDLING_LOADEVENTNEXUS_H_ #define MANTID_DATAHANDLING_LOADEVENTNEXUS_H_ -//---------------------------------------------------------------------- -// Includes -//---------------------------------------------------------------------- #include "MantidAPI/IFileLoader.h" #include "MantidAPI/WorkspaceGroup.h" #include "MantidDataHandling/BankPulseTimes.h" @@ -31,7 +28,6 @@ #include <numeric> namespace Mantid { - namespace DataHandling { /** @class LoadEventNexus LoadEventNexus.h Nexus/LoadEventNexus.h @@ -91,10 +87,6 @@ public: /// Returns a confidence value that this algorithm can load a file int confidence(Kernel::NexusDescriptor &descriptor) const override; - /** Sets whether the pixel counts will be pre-counted. - * @param value :: true if you want to precount. */ - void setPrecount(bool value) { precount = value; } - template <typename T> static boost::shared_ptr<BankPulseTimes> runLoadNexusLogs( const std::string &nexusfilename, T localWorkspace, Algorithm &alg, @@ -142,8 +134,6 @@ public: /// Filter by a maximum time-of-flight double filter_tof_max; - /// Spectra list to load - std::vector<int32_t> m_specList; /// Minimum spectrum to load int32_t m_specMin; /// Maximum spectrum to load @@ -153,14 +143,6 @@ public: Mantid::Types::Core::DateAndTime filter_time_start; /// Filter by stop time Mantid::Types::Core::DateAndTime filter_time_stop; - /// chunk number - int chunk; - /// number of chunks - int totalChunks; - /// for multiple chunks per bank - int firstChunkForBank; - /// number of chunks per bank - size_t eventsPerChunk; /// Mutex protecting tof limits std::mutex m_tofMutex; @@ -176,35 +158,9 @@ public: /// the IDF size_t discarded_events; - /// Do we pre-count the # of events in each pixel ID? - bool precount; - /// Tolerance for CompressEvents; use -1 to mean don't compress. double compressTolerance; - /// Pointer to the vector of events - typedef std::vector<Mantid::Types::Event::TofEvent> *EventVector_pt; - - /// Vector where index = event_id; value = ptr to std::vector<TofEvent> in the - /// event list. - std::vector<std::vector<EventVector_pt>> eventVectors; - - /// Mutex to protect eventVectors from each task - std::recursive_mutex m_eventVectorMutex; - - /// Maximum (inclusive) event ID possible for this instrument - int32_t eventid_max; - - /// Vector where (index = pixel ID+pixelID_to_wi_offset), value = workspace - /// index) - std::vector<size_t> pixelID_to_wi_vector; - - /// Offset in the pixelID_to_wi_vector to use. - detid_t pixelID_to_wi_offset; - - /// One entry of pulse times for each preprocessor - std::vector<boost::shared_ptr<BankPulseTimes>> m_bankPulseTimes; - /// Pulse times for ALL banks, taken from proton_charge log. boost::shared_ptr<BankPulseTimes> m_allBanksPulseTimes; @@ -212,19 +168,10 @@ public: std::string m_top_entry_name; ::NeXus::File *m_file; - /// whether or not to launch multiple ProcessBankData jobs per bank - bool splitProcessing; - - /// Flag for dealing with a simulated file - bool m_haveWeights; - - /// Pointer to the vector of weighted events - typedef std::vector<Mantid::DataObjects::WeightedEvent> * - WeightedEventVector_pt; - - /// Vector where index = event_id; value = ptr to std::vector<WeightedEvent> - /// in the event list. - std::vector<std::vector<WeightedEventVector_pt>> weightedEventVectors; +protected: + Parallel::ExecutionMode getParallelExecutionMode( + const std::map<std::string, Parallel::StorageMode> &storageModes) + const override; private: /// Intialisation code @@ -233,14 +180,12 @@ private: /// Execution code void exec() override; - DataObjects::EventWorkspace_sptr createEmptyEventWorkspace(); + bool canUseParallelLoader(const bool haveWeights, + const bool oldNeXusFileNames, + const std::string &classType) const; - /// Map detector IDs to event lists. - template <class T> - void makeMapToEventLists(std::vector<std::vector<T>> &vectors); + DataObjects::EventWorkspace_sptr createEmptyEventWorkspace(); - void createWorkspaceIndexMaps(const bool monitors, - const std::vector<std::string> &bankNames); void loadEvents(API::Progress *const prog, const bool monitors); void createSpectraMapping( const std::string &nxsfile, const bool monitorsOnly, @@ -254,8 +199,8 @@ private: void setTimeFilters(const bool monitors); /// Load a spectra mapping from the given file - bool loadSpectraMapping(const std::string &filename, const bool monitorsOnly, - const std::string &entry_name); + std::unique_ptr<std::pair<std::vector<int32_t>, std::vector<int32_t>>> + loadISISVMSSpectraMapping(const std::string &entry_name); /// ISIS specific methods for dealing with wide events void loadTimeOfFlight(EventWorkspaceCollection_sptr WS, @@ -268,9 +213,6 @@ private: size_t end_wi = 0); template <typename T> void filterDuringPause(T workspace); - // Validate the optional spectra input properties and initialize m_specList - void createSpectraList(int32_t min, int32_t max); - /// Set the top entry field name void setTopEntryName(); diff --git a/Framework/DataHandling/inc/MantidDataHandling/LoadEventNexusIndexSetup.h b/Framework/DataHandling/inc/MantidDataHandling/LoadEventNexusIndexSetup.h new file mode 100644 index 0000000000000000000000000000000000000000..c7030c76870b03452756d31300a61896a97e48d5 --- /dev/null +++ b/Framework/DataHandling/inc/MantidDataHandling/LoadEventNexusIndexSetup.h @@ -0,0 +1,72 @@ +#ifndef MANTID_DATAHANDLING_LOADEVENTNEXUSINDEXSETUP_H_ +#define MANTID_DATAHANDLING_LOADEVENTNEXUSINDEXSETUP_H_ + +#include "MantidDataHandling/DllConfig.h" +#include "MantidAPI/MatrixWorkspace.h" +#include "MantidIndexing/IndexInfo.h" +#include "MantidParallel/Communicator.h" + +namespace Mantid { +namespace DataHandling { + +/** Helper for LoadEventNexus dealing with setting up indices (spectrum numbers + an detector ID mapping) for workspaces. + + Filters set via `min`, `max`, and `range` are used by LoadEventNexus for + selecting from the `event_id` entry in Nexus files. This may either correspond + to a spectrum number (ISIS) or a detector ID. Throughout this class IndexInfo + is used for filtering and thus the spectrum number is set to the requested + event_id ranges. The final returned IndexInfo will however have spectrum + numbers that, in general, are not the event_ids (except for ISIS). + + Copyright © 2017 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge + National Laboratory & European Spallation Source + + This file is part of Mantid. + + Mantid is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + Mantid is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. + + File change history is stored at: <https://github.com/mantidproject/mantid> + Code Documentation is available at: <http://doxygen.mantidproject.org> +*/ +class MANTID_DATAHANDLING_DLL LoadEventNexusIndexSetup { +public: + LoadEventNexusIndexSetup( + API::MatrixWorkspace_const_sptr instrumentWorkspace, const int32_t min, + const int32_t max, const std::vector<int32_t> range, + const Parallel::Communicator &communicator = Parallel::Communicator()); + + std::pair<int32_t, int32_t> eventIDLimits() const; + + Indexing::IndexInfo makeIndexInfo(); + Indexing::IndexInfo makeIndexInfo(const std::vector<std::string> &bankNames); + Indexing::IndexInfo + makeIndexInfo(const std::pair<std::vector<int32_t>, std::vector<int32_t>> & + spectrumDetectorMapping, + const bool monitorsOnly); + +private: + Indexing::IndexInfo filterIndexInfo(const Indexing::IndexInfo &indexInfo); + + const API::MatrixWorkspace_const_sptr m_instrumentWorkspace; + int32_t m_min; + int32_t m_max; + std::vector<int32_t> m_range; + const Parallel::Communicator m_communicator; +}; + +} // namespace DataHandling +} // namespace Mantid + +#endif /* MANTID_DATAHANDLING_LOADEVENTNEXUSINDEXSETUP_H_ */ diff --git a/Framework/DataHandling/inc/MantidDataHandling/LoadISISNexus2.h b/Framework/DataHandling/inc/MantidDataHandling/LoadISISNexus2.h index fa4c6c961299213cf19aa2ff19d569929549cbb5..2954b9871a857df3bbdb390ee345ad5fa9332fcc 100644 --- a/Framework/DataHandling/inc/MantidDataHandling/LoadISISNexus2.h +++ b/Framework/DataHandling/inc/MantidDataHandling/LoadISISNexus2.h @@ -147,6 +147,9 @@ private: void buildSpectraInd2SpectraNumMap(bool range_supplied, bool hasSpectraList, DataBlockComposite &dataBlockComposite); + /// Check if any of the spectra block ranges overlap + void checkOverlappingSpectraRange(); + /// The name and path of the input file std::string m_filename; /// The instrument name from Nexus diff --git a/Framework/DataHandling/inc/MantidDataHandling/LoadInstrument.h b/Framework/DataHandling/inc/MantidDataHandling/LoadInstrument.h index 8c0f9ef28ce26aacfa4a8e5b8ec8cde0d9f13c3b..4e8fd0e34d77100ae666936e53df5bd4523a8977 100644 --- a/Framework/DataHandling/inc/MantidDataHandling/LoadInstrument.h +++ b/Framework/DataHandling/inc/MantidDataHandling/LoadInstrument.h @@ -1,17 +1,11 @@ #ifndef MANTID_DATAHANDLING_LOADINSTRUMENT_H_ #define MANTID_DATAHANDLING_LOADINSTRUMENT_H_ -//---------------------------------------------------------------------- -// Includes -//---------------------------------------------------------------------- -#include "MantidAPI/Algorithm.h" #include "MantidAPI/ExperimentInfo.h" +#include "MantidAPI/ParallelAlgorithm.h" #include <mutex> -//---------------------------------------------------------------------- -// Forward declarations -//---------------------------------------------------------------------- /// @cond Exclude from doxygen documentation namespace Poco { namespace XML { @@ -75,10 +69,8 @@ along with this program. If not, see <http://www.gnu.org/licenses/>. File change history is stored at: <https://github.com/mantidproject/mantid> */ -class DLLExport LoadInstrument : public API::Algorithm { +class DLLExport LoadInstrument : public API::ParallelAlgorithm { public: - /// Default constructor - LoadInstrument(); /// Algorithm's name for identification overriding a virtual method const std::string name() const override { return "LoadInstrument"; }; /// Summary of algorithms purpose diff --git a/Framework/DataHandling/inc/MantidDataHandling/LoadNexusLogs.h b/Framework/DataHandling/inc/MantidDataHandling/LoadNexusLogs.h index 6f3c70e5bef637a7a4227cc46866a47d647e9b37..74d28ae43ba0cd2dcb7426cf376adb4b6bf0fab3 100644 --- a/Framework/DataHandling/inc/MantidDataHandling/LoadNexusLogs.h +++ b/Framework/DataHandling/inc/MantidDataHandling/LoadNexusLogs.h @@ -1,16 +1,10 @@ #ifndef MANTID_DATAHANDLING_LOADNEXUSLOGS_H_ #define MANTID_DATAHANDLING_LOADNEXUSLOGS_H_ -//---------------------------------------------------------------------- -// Includes -//---------------------------------------------------------------------- -#include "MantidAPI/Algorithm.h" +#include "MantidAPI/ParallelAlgorithm.h" #include <nexus/NeXusFile.hpp> namespace Mantid { -//---------------------------------------------------------------------- -// Forward declaration -//---------------------------------------------------------------------- namespace Kernel { class Property; } @@ -54,7 +48,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>. File change history is stored at: <https://github.com/mantidproject/mantid>. Code Documentation is available at: <http://doxygen.mantidproject.org> */ -class DLLExport LoadNexusLogs : public API::Algorithm { +class DLLExport LoadNexusLogs : public API::ParallelAlgorithm { public: /// Default constructor LoadNexusLogs(); diff --git a/Framework/DataHandling/inc/MantidDataHandling/LoadParameterFile.h b/Framework/DataHandling/inc/MantidDataHandling/LoadParameterFile.h index f1faa80c7e01af17054665b9ad519cad0e824c9d..6c2ff8a5cb09ab2fca670993696330a91eef932c 100644 --- a/Framework/DataHandling/inc/MantidDataHandling/LoadParameterFile.h +++ b/Framework/DataHandling/inc/MantidDataHandling/LoadParameterFile.h @@ -1,14 +1,8 @@ #ifndef MANTID_DATAHANDLING_LOADPARAMETERFILE_H_ #define MANTID_DATAHANDLING_LOADPARAMETERFILE_H_ -//---------------------------------------------------------------------- -// Includes -//---------------------------------------------------------------------- -#include "MantidAPI/Algorithm.h" - -//---------------------------------------------------------------------- -// Forward declaration -//---------------------------------------------------------------------- +#include "MantidAPI/ParallelAlgorithm.h" + /// @cond Exclude from doxygen documentation namespace Poco { namespace XML { @@ -68,10 +62,8 @@ along with this program. If not, see <http://www.gnu.org/licenses/>. File change history is stored at: <https://github.com/mantidproject/mantid> */ -class DLLExport LoadParameterFile : public API::Algorithm { +class DLLExport LoadParameterFile : public API::ParallelAlgorithm { public: - /// Default constructor - LoadParameterFile(); /// Algorithm's name for identification overriding a virtual method const std::string name() const override { return "LoadParameterFile"; }; /// Summary of algorithms purpose diff --git a/Framework/DataHandling/inc/MantidDataHandling/MaskDetectors.h b/Framework/DataHandling/inc/MantidDataHandling/MaskDetectors.h index c251c1ec12b7947d463ecccbc5c8898158c4ff54..eeb72c9fea0682d8bc75a9dd9b57c3ff71e00040 100644 --- a/Framework/DataHandling/inc/MantidDataHandling/MaskDetectors.h +++ b/Framework/DataHandling/inc/MantidDataHandling/MaskDetectors.h @@ -99,10 +99,11 @@ private: const RangeInfo &rangeInfo); void execPeaks(DataObjects::PeaksWorkspace_sptr WS); - void fillIndexListFromSpectra( - std::vector<size_t> &indexList, - const std::vector<Indexing::SpectrumNumber> &spectraList, - const API::MatrixWorkspace_sptr WS, const RangeInfo &range_info); + void + fillIndexListFromSpectra(std::vector<size_t> &indexList, + std::vector<Indexing::SpectrumNumber> spectraList, + const API::MatrixWorkspace_sptr WS, + const RangeInfo &range_info); void appendToDetectorListFromComponentList( std::vector<detid_t> &detectorList, const std::vector<std::string> &componentList, diff --git a/Framework/DataHandling/inc/MantidDataHandling/ParallelEventLoader.h b/Framework/DataHandling/inc/MantidDataHandling/ParallelEventLoader.h new file mode 100644 index 0000000000000000000000000000000000000000..1af7bf563001db7dc80a497713b313f1658cd856 --- /dev/null +++ b/Framework/DataHandling/inc/MantidDataHandling/ParallelEventLoader.h @@ -0,0 +1,54 @@ +#ifndef MANTID_DATAHANDLING_PARALLELEVENTLOADER_H_ +#define MANTID_DATAHANDLING_PARALLELEVENTLOADER_H_ + +#include <string> +#include <vector> + +#include "MantidDataHandling/DllConfig.h" + +namespace Mantid { +namespace DataObjects { +class EventWorkspace; +} +namespace DataHandling { + +/** Loader for event data from Nexus files with parallelism based on multiple + processes (MPI) for performance. This class provides integration of the low + level loader component Parallel::IO::EventLoader with higher level concepts + such as DataObjects::EventWorkspace and the instrument. + + @author Simon Heybrock + @date 2017 + + Copyright © 2017 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge + National Laboratory & European Spallation Source + + This file is part of Mantid. + + Mantid is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + Mantid is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. + + File change history is stored at: <https://github.com/mantidproject/mantid> + Code Documentation is available at: <http://doxygen.mantidproject.org> +*/ +class MANTID_DATAHANDLING_DLL ParallelEventLoader { +public: + static void load(DataObjects::EventWorkspace &ws, const std::string &filename, + const std::string &groupName, + const std::vector<std::string> &bankNames); +}; + +} // namespace DataHandling +} // namespace Mantid + +#endif /* MANTID_DATAHANDLING_PARALLELEVENTLOADER_H_ */ diff --git a/Framework/DataHandling/inc/MantidDataHandling/ProcessBankData.h b/Framework/DataHandling/inc/MantidDataHandling/ProcessBankData.h index 9a8cb89506d9157474adc0e4f7e90c09630e15af..1771d84b0d19660acaa17349060cc1ba62f43421 100644 --- a/Framework/DataHandling/inc/MantidDataHandling/ProcessBankData.h +++ b/Framework/DataHandling/inc/MantidDataHandling/ProcessBankData.h @@ -1,30 +1,24 @@ #ifndef MANTID_DATAHANDLING_PROCESSBANKDATA_H #define MANTID_DATAHANDLING_PROCESSBANKDATA_H -// #include "MantidAPI/IFileLoader.h" #include "MantidGeometry/IDTypes.h" -// Process bank data #include "MantidKernel/Task.h" #include "MantidKernel/Timer.h" -#include "MantidDataHandling/LoadEventNexus.h" #include "MantidDataHandling/BankPulseTimes.h" #include <boost/shared_array.hpp> namespace Mantid { namespace DataHandling { +class DefaultEventLoader; -//============================================================================================== -// Class ProcessBankData -//============================================================================================== /** This task does the disk IO from loading the NXS file, * and so will be on a disk IO mutex */ class ProcessBankData : public Mantid::Kernel::Task { public: - //---------------------------------------------------------------------------------------------- /** Constructor * - * @param alg :: LoadEventNexus + * @param loader :: DefaultEventLoader * @param entry_name :: name of the bank * @param prog :: Progress reporter * @param event_id :: array with event IDs @@ -40,7 +34,7 @@ public: * @param max_event_id :: maximum detector ID to load * @return */ // API::IFileLoader<Kernel::NexusDescriptor> - ProcessBankData(LoadEventNexus *alg, std::string entry_name, + ProcessBankData(DefaultEventLoader &loader, std::string entry_name, API::Progress *prog, boost::shared_array<uint32_t> event_id, boost::shared_array<float> event_time_of_flight, size_t numEvents, size_t startAt, @@ -55,7 +49,7 @@ private: size_t getWorkspaceIndexFromPixelID(const detid_t pixID); /// Algorithm being run - LoadEventNexus *alg; + DefaultEventLoader &m_loader; /// NXS path to bank std::string entry_name; /// Vector where (index = pixel ID+pixelID_to_wi_offset), value = workspace diff --git a/Framework/DataHandling/src/CompressEvents.cpp b/Framework/DataHandling/src/CompressEvents.cpp index 3a8ae8b8257fc9e4b5884db7355094440ca706fb..02f1a12dfe1c726dc5b860ce4087c896b55dd48a 100644 --- a/Framework/DataHandling/src/CompressEvents.cpp +++ b/Framework/DataHandling/src/CompressEvents.cpp @@ -1,6 +1,7 @@ #include "MantidDataHandling/CompressEvents.h" #include "MantidAPI/WorkspaceFactory.h" #include "MantidDataObjects/EventWorkspace.h" +#include "MantidDataObjects/WorkspaceCreation.h" #include "MantidKernel/ArrayProperty.h" #include "MantidKernel/BoundedValidator.h" @@ -56,13 +57,7 @@ void CompressEvents::exec() { // Are we making a copy of the input workspace? if (!inplace) { - // Make a brand new EventWorkspace - outputWS = boost::dynamic_pointer_cast<EventWorkspace>( - API::WorkspaceFactory::Instance().create( - "EventWorkspace", inputWS->getNumberHistograms(), 2, 1)); - // Copy geometry over. - API::WorkspaceFactory::Instance().initializeFromParent(*inputWS, *outputWS, - false); + outputWS = create<EventWorkspace>(*inputWS, HistogramData::BinEdges(2)); // We DONT copy the data though // Loop over the histograms (detector spectra) tbb::parallel_for(tbb::blocked_range<size_t>(0, noSpectra), diff --git a/Framework/DataHandling/src/DefaultEventLoader.cpp b/Framework/DataHandling/src/DefaultEventLoader.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ba8adcca290c5c7b425d05ce6579f99d0829b128 --- /dev/null +++ b/Framework/DataHandling/src/DefaultEventLoader.cpp @@ -0,0 +1,152 @@ +#include "MantidDataHandling/DefaultEventLoader.h" +#include "MantidDataHandling/LoadBankFromDiskTask.h" +#include "MantidDataHandling/LoadEventNexus.h" +#include "MantidAPI/Progress.h" +#include "MantidKernel/ThreadPool.h" +#include "MantidKernel/ThreadSchedulerMutexes.h" +#include "MantidKernel/make_unique.h" + +using namespace Mantid::Kernel; + +namespace Mantid { +namespace DataHandling { + +void DefaultEventLoader::load(LoadEventNexus *alg, EventWorkspaceCollection &ws, + bool haveWeights, bool event_id_is_spec, + std::vector<std::string> bankNames, + const std::vector<int> &periodLog, + const std::string &classType, + std::vector<std::size_t> bankNumEvents, + const bool oldNeXusFileNames, const bool precount, + const int chunk, const int totalChunks) { + DefaultEventLoader loader(alg, ws, haveWeights, event_id_is_spec, + bankNames.size(), precount, chunk, totalChunks); + + auto bankRange = loader.setupChunking(bankNames, bankNumEvents); + + // Make the thread pool + auto scheduler = new ThreadSchedulerMutexes; + ThreadPool pool(scheduler); + auto diskIOMutex = boost::make_shared<std::mutex>(); + + // set up progress bar for the rest of the (multi-threaded) process + size_t numProg = bankNames.size() * (1 + 3); // 1 = disktask, 3 = proc task + if (loader.splitProcessing) + numProg += bankNames.size() * 3; // 3 = second proc task + auto prog = Kernel::make_unique<API::Progress>(loader.alg, 0.3, 1.0, numProg); + + for (size_t i = bankRange.first; i < bankRange.second; i++) { + if (bankNumEvents[i] > 0) + pool.schedule(new LoadBankFromDiskTask( + loader, bankNames[i], classType, bankNumEvents[i], oldNeXusFileNames, + prog.get(), diskIOMutex, *scheduler, periodLog)); + } + // Start and end all threads + pool.joinAll(); + diskIOMutex.reset(); +} + +DefaultEventLoader::DefaultEventLoader(LoadEventNexus *alg, + EventWorkspaceCollection &ws, + bool haveWeights, bool event_id_is_spec, + const size_t numBanks, + const bool precount, const int chunk, + const int totalChunks) + : m_haveWeights(haveWeights), event_id_is_spec(event_id_is_spec), + precount(precount), chunk(chunk), totalChunks(totalChunks), alg(alg), + m_ws(ws) { + // This map will be used to find the workspace index + if (event_id_is_spec) + pixelID_to_wi_vector = + m_ws.getSpectrumToWorkspaceIndexVector(pixelID_to_wi_offset); + else + pixelID_to_wi_vector = + m_ws.getDetectorIDToWorkspaceIndexVector(pixelID_to_wi_offset, true); + + // Cache a map for speed. + if (!haveWeights) { + makeMapToEventLists(eventVectors); + } else { + // Convert to weighted events + for (size_t i = 0; i < m_ws.getNumberHistograms(); i++) { + m_ws.getSpectrum(i).switchTo(API::WEIGHTED); + } + makeMapToEventLists(weightedEventVectors); + } + + // split banks up if the number of cores is more than twice the number of + // banks + splitProcessing = bool(numBanks * 2 < ThreadPool::getNumPhysicalCores()); +} + +std::pair<size_t, size_t> +DefaultEventLoader::setupChunking(std::vector<std::string> &bankNames, + std::vector<std::size_t> &bankNumEvents) { + size_t bank0 = 0; + size_t bankn = bankNames.size(); + if (chunk != + EMPTY_INT()) // We are loading part - work out the bank number range + { + const size_t total_events = std::accumulate( + bankNumEvents.cbegin(), bankNumEvents.cend(), static_cast<size_t>(0)); + eventsPerChunk = total_events / totalChunks; + // Sort banks by size + size_t tmp; + std::string stmp; + for (size_t i = 0; i < bankn; i++) + for (size_t j = 0; j < bankn - 1; j++) + if (bankNumEvents[j] < bankNumEvents[j + 1]) { + tmp = bankNumEvents[j]; + bankNumEvents[j] = bankNumEvents[j + 1]; + bankNumEvents[j + 1] = tmp; + stmp = bankNames[j]; + bankNames[j] = bankNames[j + 1]; + bankNames[j + 1] = stmp; + } + int bigBanks = 0; + for (size_t i = 0; i < bankn; i++) + if (bankNumEvents[i] > eventsPerChunk) + bigBanks++; + // Each chunk is part of bank or multiple whole banks + // 0.5 for last chunk of a bank with multiple chunks + // 0.1 for multiple whole banks not completely filled + eventsPerChunk += + static_cast<size_t>((static_cast<double>(bigBanks) / + static_cast<double>(totalChunks) * 0.5 + + 0.05) * + static_cast<double>(eventsPerChunk)); + double partialChunk = 0.; + firstChunkForBank = 1; + for (int chunki = 1; chunki <= chunk; chunki++) { + if (partialChunk > 1.) { + partialChunk = 0.; + firstChunkForBank = chunki; + bank0 = bankn; + } + if (bankNumEvents[bank0] > 1) { + partialChunk += static_cast<double>(eventsPerChunk) / + static_cast<double>(bankNumEvents[bank0]); + } + if (chunki < totalChunks) + bankn = bank0 + 1; + else + bankn = bankNames.size(); + if (chunki == firstChunkForBank && partialChunk > 1.0) + bankn += static_cast<size_t>(partialChunk) - 1; + if (bankn > bankNames.size()) + bankn = bankNames.size(); + } + for (size_t i = bank0; i < bankn; i++) { + size_t start_event = (chunk - firstChunkForBank) * eventsPerChunk; + size_t stop_event = bankNumEvents[i]; + // Don't change stop_event for the final chunk + if (start_event + eventsPerChunk < stop_event) + stop_event = start_event + eventsPerChunk; + bankNumEvents[i] = stop_event - start_event; + } + } + return {bank0, bankn}; +} + +} // namespace DataHandling +} // namespace Mantid diff --git a/Framework/DataHandling/src/EventWorkspaceCollection.cpp b/Framework/DataHandling/src/EventWorkspaceCollection.cpp index 3a6f9782e3273eddb2cbc066ea9a012264f485a5..9123ed563cf3be73b0e0f91cd6b87b3ead9f6982 100644 --- a/Framework/DataHandling/src/EventWorkspaceCollection.cpp +++ b/Framework/DataHandling/src/EventWorkspaceCollection.cpp @@ -1,11 +1,13 @@ #include "MantidDataHandling/EventWorkspaceCollection.h" #include "MantidDataObjects/EventWorkspace.h" +#include "MantidDataObjects/WorkspaceCreation.h" #include "MantidKernel/UnitFactory.h" #include "MantidGeometry/Instrument.h" #include "MantidAPI/Axis.h" #include "MantidAPI/Run.h" #include "MantidAPI/Sample.h" #include "MantidAPI/WorkspaceFactory.h" +#include "MantidIndexing/IndexInfo.h" #include <vector> #include <set> @@ -224,34 +226,10 @@ size_t EventWorkspaceCollection::getNumberEvents() const { return m_WsVec[0]->getNumberEvents(); // Should be the sum across all periods? } -void EventWorkspaceCollection::resizeTo(const size_t size) { - for (auto &ws : m_WsVec) { - auto tmp = createWorkspace<DataObjects::EventWorkspace>(size, 2, 1); - WorkspaceFactory::Instance().initializeFromParent(*ws, *tmp, true); - ws = std::move(tmp); - for (size_t i = 0; i < ws->getNumberHistograms(); ++i) - ws->getSpectrum(i).setSpectrumNo(static_cast<specnum_t>(i + 1)); - } -} - -void EventWorkspaceCollection::padSpectra(const std::vector<int32_t> &padding) { - if (padding.empty()) { - const std::vector<detid_t> pixelIDs = getInstrument()->getDetectorIDs(true); - resizeTo(pixelIDs.size()); - for (auto &ws : m_WsVec) - for (size_t i = 0; i < pixelIDs.size(); ++i) - ws->getSpectrum(i).setDetectorID(pixelIDs[i]); - } else { - resizeTo(padding.size()); - for (auto &ws : m_WsVec) { - for (size_t i = 0; i < padding.size(); ++i) { - // specList ranges from 1, ..., N - // detector ranges from 0, ..., N-1 - ws->getSpectrum(i).setDetectorID(padding[i] - 1); - ws->getSpectrum(i).setSpectrumNo(padding[i]); - } - } - } +void EventWorkspaceCollection::setIndexInfo( + const Indexing::IndexInfo &indexInfo) { + for (auto &ws : m_WsVec) + ws = create<EventWorkspace>(*ws, indexInfo, HistogramData::BinEdges(2)); } void EventWorkspaceCollection::setInstrument( @@ -274,12 +252,6 @@ void EventWorkspaceCollection::updateSpectraUsing( } } -void EventWorkspaceCollection::populateInstrumentParameters() { - for (auto &ws : m_WsVec) { - ws->populateInstrumentParameters(); - } -} - void EventWorkspaceCollection::setGeometryFlag(const int flag) { for (auto &ws : m_WsVec) { ws->mutableSample().setGeometryFlag(flag); diff --git a/Framework/DataHandling/src/LoadBankFromDiskTask.cpp b/Framework/DataHandling/src/LoadBankFromDiskTask.cpp new file mode 100644 index 0000000000000000000000000000000000000000..25e44bf91ea335cdb533652839d6016fb886eecc --- /dev/null +++ b/Framework/DataHandling/src/LoadBankFromDiskTask.cpp @@ -0,0 +1,511 @@ +#include "MantidDataHandling/BankPulseTimes.h" +#include "MantidDataHandling/DefaultEventLoader.h" +#include "MantidDataHandling/LoadBankFromDiskTask.h" +#include "MantidDataHandling/LoadEventNexus.h" +#include "MantidDataHandling/ProcessBankData.h" + +namespace Mantid { +namespace DataHandling { + +/** Constructor +* +* @param loader :: Handle to the main loader +* @param entry_name :: The pathname of the bank to load +* @param entry_type :: The classtype of the entry to load +* @param numEvents :: The number of events in the bank. +* @param oldNeXusFileNames :: Identify if file is of old variety. +* @param prog :: an optional Progress object +* @param ioMutex :: a mutex shared for all Disk I-O tasks +* @param scheduler :: the ThreadScheduler that runs this task. +* @param framePeriodNumbers :: Period numbers corresponding to each frame +*/ +LoadBankFromDiskTask::LoadBankFromDiskTask( + DefaultEventLoader &loader, const std::string &entry_name, + const std::string &entry_type, const std::size_t numEvents, + const bool oldNeXusFileNames, API::Progress *prog, + boost::shared_ptr<std::mutex> ioMutex, Kernel::ThreadScheduler &scheduler, + const std::vector<int> &framePeriodNumbers) + : m_loader(loader), entry_name(entry_name), entry_type(entry_type), + prog(prog), scheduler(scheduler), m_loadError(false), + m_oldNexusFileNames(oldNeXusFileNames), m_event_id(nullptr), + m_event_time_of_flight(nullptr), m_have_weight(false), + m_event_weight(nullptr), m_framePeriodNumbers(framePeriodNumbers) { + setMutex(ioMutex); + m_cost = static_cast<double>(numEvents); + m_min_id = std::numeric_limits<uint32_t>::max(); + m_max_id = 0; +} + +/** Load the pulse times, if needed. This sets +* thisBankPulseTimes to the right pointer. +* */ +void LoadBankFromDiskTask::loadPulseTimes(::NeXus::File &file) { + try { + // First, get info about the event_time_zero field in this bank + file.openData("event_time_zero"); + } catch (::NeXus::Exception &) { + // Field not found error is most likely. + // Use the "proton_charge" das logs. + thisBankPulseTimes = m_loader.alg->m_allBanksPulseTimes; + return; + } + std::string thisStartTime; + size_t thisNumPulses = 0; + file.getAttr("offset", thisStartTime); + if (!file.getInfo().dims.empty()) + thisNumPulses = file.getInfo().dims[0]; + file.closeData(); + + // Now, we look through existing ones to see if it is already loaded + // thisBankPulseTimes = NULL; + for (auto &bankPulseTime : m_loader.m_bankPulseTimes) { + if (bankPulseTime->equals(thisNumPulses, thisStartTime)) { + thisBankPulseTimes = bankPulseTime; + return; + } + } + + // Not found? Need to load and add it + thisBankPulseTimes = boost::make_shared<BankPulseTimes>(boost::ref(file), + m_framePeriodNumbers); + m_loader.m_bankPulseTimes.push_back(thisBankPulseTimes); +} + +/** Load the event_index field +(a list of size of # of pulses giving the index in the event list for that +pulse) + +* @param file :: File handle for the NeXus file +* @param event_index :: ref to the vector +*/ +void LoadBankFromDiskTask::loadEventIndex(::NeXus::File &file, + std::vector<uint64_t> &event_index) { + // Get the event_index (a list of size of # of pulses giving the index in + // the event list for that pulse) + file.openData("event_index"); + // Must be uint64 + if (file.getInfo().type == ::NeXus::UINT64) + file.getData(event_index); + else { + m_loader.alg->getLogger().warning() + << "Entry " << entry_name + << "'s event_index field is not UINT64! It will be skipped.\n"; + m_loadError = true; + } + file.closeData(); + + // Look for the sign that the bank is empty + if (event_index.size() == 1) { + if (event_index[0] == 0) { + // One entry, only zero. This means NO events in this bank. + m_loadError = true; + m_loader.alg->getLogger().debug() << "Bank " << entry_name + << " is empty.\n"; + } + } +} + +/** Open the event_id field and validate the contents +* +* @param file :: File handle for the NeXus file +* @param start_event :: set to the index of the first event +* @param stop_event :: set to the index of the last event + 1 +* @param event_index :: (a list of size of # of pulses giving the index in +*the event list for that pulse) +*/ +void LoadBankFromDiskTask::prepareEventId(::NeXus::File &file, + size_t &start_event, + size_t &stop_event, + std::vector<uint64_t> &event_index) { + // Get the list of pixel ID's + if (m_oldNexusFileNames) + file.openData("event_pixel_id"); + else + file.openData("event_id"); + + // By default, use all available indices + start_event = 0; + ::NeXus::Info id_info = file.getInfo(); + // dims[0] can be negative in ISIS meaning 2^32 + dims[0]. Take that into + // account + int64_t dim0 = recalculateDataSize(id_info.dims[0]); + stop_event = static_cast<size_t>(dim0); + + // Handle the time filtering by changing the start/end offsets. + for (size_t i = 0; i < thisBankPulseTimes->numPulses; i++) { + if (thisBankPulseTimes->pulseTimes[i] >= m_loader.alg->filter_time_start) { + start_event = event_index[i]; + break; // stop looking + } + } + + if (start_event > static_cast<size_t>(dim0)) { + // If the frame indexes are bad then we can't construct the times of the + // events properly and filtering by time + // will not work on this data + m_loader.alg->getLogger().warning() + << this->entry_name + << "'s field 'event_index' seems to be invalid (start_index > than " + "the number of events in the bank)." + << "All events will appear in the same frame and filtering by time " + "will not be possible on this data.\n"; + start_event = 0; + stop_event = static_cast<size_t>(dim0); + } else { + for (size_t i = 0; i < thisBankPulseTimes->numPulses; i++) { + if (thisBankPulseTimes->pulseTimes[i] > m_loader.alg->filter_time_stop) { + stop_event = event_index[i]; + break; + } + } + } + // We are loading part - work out the event number range + if (m_loader.chunk != EMPTY_INT()) { + start_event = + (m_loader.chunk - m_loader.firstChunkForBank) * m_loader.eventsPerChunk; + // Don't change stop_event for the final chunk + if (start_event + m_loader.eventsPerChunk < stop_event) + stop_event = start_event + m_loader.eventsPerChunk; + } + + // Make sure it is within range + if (stop_event > static_cast<size_t>(dim0)) + stop_event = dim0; + + m_loader.alg->getLogger().debug() << entry_name << ": start_event " + << start_event << " stop_event " + << stop_event << "\n"; +} + +/** Load the event_id field, which has been open +*/ +void LoadBankFromDiskTask::loadEventId(::NeXus::File &file) { + // This is the data size + ::NeXus::Info id_info = file.getInfo(); + int64_t dim0 = recalculateDataSize(id_info.dims[0]); + + // Now we allocate the required arrays + m_event_id = new uint32_t[m_loadSize[0]]; + + // Check that the required space is there in the file. + if (dim0 < m_loadSize[0] + m_loadStart[0]) { + m_loader.alg->getLogger().warning() + << "Entry " << entry_name << "'s event_id field is too small (" << dim0 + << ") to load the desired data size (" << m_loadSize[0] + m_loadStart[0] + << ").\n"; + m_loadError = true; + } + + if (m_loader.alg->getCancel()) + m_loadError = true; // To allow cancelling the algorithm + + if (!m_loadError) { + // Must be uint32 + if (id_info.type == ::NeXus::UINT32) + file.getSlab(m_event_id, m_loadStart, m_loadSize); + else { + m_loader.alg->getLogger().warning() + << "Entry " << entry_name + << "'s event_id field is not UINT32! It will be skipped.\n"; + m_loadError = true; + } + file.closeData(); + + // determine the range of pixel ids + for (auto i = 0; i < m_loadSize[0]; ++i) { + uint32_t temp = m_event_id[i]; + if (temp < m_min_id) + m_min_id = temp; + if (temp > m_max_id) + m_max_id = temp; + } + + if (m_min_id > static_cast<uint32_t>(m_loader.eventid_max)) { + // All the detector IDs in the bank are higher than the highest 'known' + // (from the IDF) + // ID. Setting this will abort the loading of the bank. + m_loadError = true; + } + // fixup the minimum pixel id in the case that it's lower than the lowest + // 'known' id. We test this by checking that when we add the offset we + // would not get a negative index into the vector. Note that m_min_id is + // a uint so we have to be cautious about adding it to an int which may be + // negative. + if (static_cast<int32_t>(m_min_id) + m_loader.pixelID_to_wi_offset < 0) { + m_min_id = static_cast<uint32_t>(abs(m_loader.pixelID_to_wi_offset)); + } + // fixup the maximum pixel id in the case that it's higher than the + // highest 'known' id + if (m_max_id > static_cast<uint32_t>(m_loader.eventid_max)) + m_max_id = static_cast<uint32_t>(m_loader.eventid_max); + } +} + +/** Open and load the times-of-flight data +*/ +void LoadBankFromDiskTask::loadTof(::NeXus::File &file) { + // Allocate the array + auto temp = new float[m_loadSize[0]]; + delete[] m_event_time_of_flight; + m_event_time_of_flight = temp; + + // Get the list of event_time_of_flight's + if (!m_oldNexusFileNames) + file.openData("event_time_offset"); + else + file.openData("event_time_of_flight"); + + // Check that the required space is there in the file. + ::NeXus::Info tof_info = file.getInfo(); + int64_t tof_dim0 = recalculateDataSize(tof_info.dims[0]); + if (tof_dim0 < m_loadSize[0] + m_loadStart[0]) { + m_loader.alg->getLogger().warning() + << "Entry " << entry_name << "'s event_time_offset field is too small " + "to load the desired data.\n"; + m_loadError = true; + } + + // Check that the type is what it is supposed to be + if (tof_info.type == ::NeXus::FLOAT32) + file.getSlab(m_event_time_of_flight, m_loadStart, m_loadSize); + else { + m_loader.alg->getLogger().warning() + << "Entry " << entry_name + << "'s event_time_offset field is not FLOAT32! It will be skipped.\n"; + m_loadError = true; + } + + if (!m_loadError) { + std::string units; + file.getAttr("units", units); + if (units != "microsecond") { + m_loader.alg->getLogger().warning() + << "Entry " << entry_name << "'s event_time_offset field's units are " + "not microsecond. It will be skipped.\n"; + m_loadError = true; + } + file.closeData(); + } // no error +} + +/** Load weight of weigthed events +*/ +void LoadBankFromDiskTask::loadEventWeights(::NeXus::File &file) { + try { + // First, get info about the event_weight field in this bank + file.openData("event_weight"); + } catch (::NeXus::Exception &) { + // Field not found error is most likely. + m_have_weight = false; + return; + } + // OK, we've got them + m_have_weight = true; + + // Allocate the array + auto temp = new float[m_loadSize[0]]; + delete[] m_event_weight; + m_event_weight = temp; + + ::NeXus::Info weight_info = file.getInfo(); + int64_t weight_dim0 = recalculateDataSize(weight_info.dims[0]); + if (weight_dim0 < m_loadSize[0] + m_loadStart[0]) { + m_loader.alg->getLogger().warning() + << "Entry " << entry_name + << "'s event_weight field is too small to load the desired data.\n"; + m_loadError = true; + } + + // Check that the type is what it is supposed to be + if (weight_info.type == ::NeXus::FLOAT32) + file.getSlab(m_event_weight, m_loadStart, m_loadSize); + else { + m_loader.alg->getLogger().warning() + << "Entry " << entry_name + << "'s event_weight field is not FLOAT32! It will be skipped.\n"; + m_loadError = true; + } + + if (!m_loadError) { + file.closeData(); + } +} + +void LoadBankFromDiskTask::run() { + // The vectors we will be filling + auto event_index_ptr = new std::vector<uint64_t>(); + std::vector<uint64_t> &event_index = *event_index_ptr; + + // These give the limits in each file as to which events we actually load + // (when filtering by time). + m_loadStart.resize(1, 0); + m_loadSize.resize(1, 0); + + // Data arrays + m_event_id = nullptr; + m_event_time_of_flight = nullptr; + m_event_weight = nullptr; + + m_loadError = false; + m_have_weight = m_loader.m_haveWeights; + + prog->report(entry_name + ": load from disk"); + + // Open the file + ::NeXus::File file(m_loader.alg->m_filename); + try { + // Navigate into the file + file.openGroup(m_loader.alg->m_top_entry_name, "NXentry"); + // Open the bankN_event group + file.openGroup(entry_name, entry_type); + + // Load the event_index field. + this->loadEventIndex(file, event_index); + + if (!m_loadError) { + // Load and validate the pulse times + this->loadPulseTimes(file); + + // The event_index should be the same length as the pulse times from DAS + // logs. + if (event_index.size() != thisBankPulseTimes->numPulses) + m_loader.alg->getLogger().warning() + << "Bank " << entry_name + << " has a mismatch between the number of event_index entries " + "and the number of pulse times in event_time_zero.\n"; + + // Open and validate event_id field. + size_t start_event = 0; + size_t stop_event = 0; + this->prepareEventId(file, start_event, stop_event, event_index); + + // These are the arguments to getSlab() + m_loadStart[0] = static_cast<int>(start_event); + m_loadSize[0] = static_cast<int>(stop_event - start_event); + + if ((m_loadSize[0] > 0) && (m_loadStart[0] >= 0)) { + // Load pixel IDs + this->loadEventId(file); + if (m_loader.alg->getCancel()) + m_loadError = true; // To allow cancelling the algorithm + + // And TOF. + if (!m_loadError) { + this->loadTof(file); + if (m_have_weight) { + this->loadEventWeights(file); + } + } + } // Size is at least 1 + else { + // Found a size that was 0 or less; stop processing + m_loadError = true; + } + + } // no error + + } // try block + catch (std::exception &e) { + m_loader.alg->getLogger().error() << "Error while loading bank " + << entry_name << ":\n"; + m_loader.alg->getLogger().error() << e.what() << '\n'; + m_loadError = true; + } catch (...) { + m_loader.alg->getLogger().error() << "Unspecified error while loading bank " + << entry_name << '\n'; + m_loadError = true; + } + + // Close up the file even if errors occured. + file.closeGroup(); + file.close(); + + // Abort if anything failed + if (m_loadError) { + delete[] m_event_id; + delete[] m_event_time_of_flight; + if (m_have_weight) { + delete[] m_event_weight; + } + delete event_index_ptr; + + return; + } + + const auto bank_size = m_max_id - m_min_id; + const uint32_t minSpectraToLoad = + static_cast<uint32_t>(m_loader.alg->m_specMin); + const uint32_t maxSpectraToLoad = + static_cast<uint32_t>(m_loader.alg->m_specMax); + const uint32_t emptyInt = static_cast<uint32_t>(EMPTY_INT()); + // check that if a range of spectra were requested that these fit within + // this bank + if (minSpectraToLoad != emptyInt && m_min_id < minSpectraToLoad) { + if (minSpectraToLoad > m_max_id) { // the minimum spectra to load is more + // than the max of this bank + return; + } + // the min spectra to load is higher than the min for this bank + m_min_id = minSpectraToLoad; + } + if (maxSpectraToLoad != emptyInt && m_max_id > maxSpectraToLoad) { + if (maxSpectraToLoad < m_min_id) { + // the maximum spectra to load is less than the minimum of this bank + return; + } + // the max spectra to load is lower than the max for this bank + m_max_id = maxSpectraToLoad; + } + if (m_min_id > m_max_id) { + // the min is now larger than the max, this means the entire block of + // spectra to load is outside this bank + return; + } + + // schedule the job to generate the event lists + auto mid_id = m_max_id; + if (m_loader.splitProcessing && m_max_id > (m_min_id + (bank_size / 4))) + // only split if told to and the section to load is at least 1/4 the size + // of the whole bank + mid_id = (m_max_id + m_min_id) / 2; + + // No error? Launch a new task to process that data. + size_t numEvents = m_loadSize[0]; + size_t startAt = m_loadStart[0]; + + // convert things to shared_arrays + boost::shared_array<uint32_t> event_id_shrd(m_event_id); + boost::shared_array<float> event_time_of_flight_shrd(m_event_time_of_flight); + boost::shared_array<float> event_weight_shrd(m_event_weight); + boost::shared_ptr<std::vector<uint64_t>> event_index_shrd(event_index_ptr); + + ProcessBankData *newTask1 = new ProcessBankData( + m_loader, entry_name, prog, event_id_shrd, event_time_of_flight_shrd, + numEvents, startAt, event_index_shrd, thisBankPulseTimes, m_have_weight, + event_weight_shrd, m_min_id, mid_id); + scheduler.push(newTask1); + if (m_loader.splitProcessing && (mid_id < m_max_id)) { + ProcessBankData *newTask2 = new ProcessBankData( + m_loader, entry_name, prog, event_id_shrd, event_time_of_flight_shrd, + numEvents, startAt, event_index_shrd, thisBankPulseTimes, m_have_weight, + event_weight_shrd, (mid_id + 1), m_max_id); + scheduler.push(newTask2); + } +} + +/** +* Interpret the value describing the number of events. If the number is +* positive return it unchanged. +* If the value is negative (can happen at ISIS) add 2^32 to it. +* @param size :: The size of events value. +*/ +int64_t LoadBankFromDiskTask::recalculateDataSize(const int64_t &size) { + if (size < 0) { + const int64_t shift = int64_t(1) << 32; + return shift + size; + } + return size; +} + +} // namespace DataHandling +} // namespace Mantid diff --git a/Framework/DataHandling/src/LoadEventNexus.cpp b/Framework/DataHandling/src/LoadEventNexus.cpp index 3bca9459782a663cf12be3db1b4a2e383a509152..98a0e916c74c516abbcc3c7c9140596ee4f0b4ab 100644 --- a/Framework/DataHandling/src/LoadEventNexus.cpp +++ b/Framework/DataHandling/src/LoadEventNexus.cpp @@ -1,12 +1,13 @@ #include "MantidDataHandling/LoadEventNexus.h" +#include "MantidDataHandling/LoadEventNexusIndexSetup.h" #include "MantidDataHandling/EventWorkspaceCollection.h" -#include "MantidDataHandling/ProcessBankData.h" +#include "MantidDataHandling/DefaultEventLoader.h" +#include "MantidDataHandling/ParallelEventLoader.h" #include "MantidAPI/Axis.h" #include "MantidAPI/FileProperty.h" #include "MantidAPI/RegisterFileLoader.h" #include "MantidAPI/Run.h" #include "MantidAPI/Sample.h" -#include "MantidAPI/SpectrumDetectorMapping.h" #include "MantidGeometry/Instrument.h" #include "MantidGeometry/Instrument/Goniometer.h" #include "MantidGeometry/Instrument/RectangularDetector.h" @@ -14,12 +15,11 @@ #include "MantidKernel/BoundedValidator.h" #include "MantidKernel/DateAndTimeHelpers.h" #include "MantidKernel/MultiThreaded.h" -#include "MantidKernel/ThreadPool.h" -#include "MantidKernel/ThreadSchedulerMutexes.h" #include "MantidKernel/TimeSeriesProperty.h" #include "MantidKernel/Timer.h" #include "MantidKernel/UnitFactory.h" #include "MantidKernel/VisibleWhenProperty.h" +#include "MantidIndexing/IndexInfo.h" #include <boost/function.hpp> #include <boost/random/mersenne_twister.hpp> @@ -72,577 +72,15 @@ void copyLogs(const Mantid::DataHandling::EventWorkspaceCollection_sptr &from, } } -//============================================================================================== -// Class LoadBankFromDiskTask -//============================================================================================== -/** This task does the disk IO from loading the NXS file, -* and so will be on a disk IO mutex */ -class LoadBankFromDiskTask : public Task { - -public: - //--------------------------------------------------------------------------------------------------- - /** Constructor - * - * @param alg :: Handle to the main algorithm - * @param entry_name :: The pathname of the bank to load - * @param entry_type :: The classtype of the entry to load - * @param numEvents :: The number of events in the bank. - * @param oldNeXusFileNames :: Identify if file is of old variety. - * @param prog :: an optional Progress object - * @param ioMutex :: a mutex shared for all Disk I-O tasks - * @param scheduler :: the ThreadScheduler that runs this task. - * @param framePeriodNumbers :: Period numbers corresponding to each frame - */ - LoadBankFromDiskTask(LoadEventNexus *alg, const std::string &entry_name, - const std::string &entry_type, - const std::size_t numEvents, - const bool oldNeXusFileNames, Progress *prog, - boost::shared_ptr<std::mutex> ioMutex, - ThreadScheduler *scheduler, - const std::vector<int> &framePeriodNumbers) - : Task(), alg(alg), entry_name(entry_name), entry_type(entry_type), - // prog(prog), scheduler(scheduler), thisBankPulseTimes(NULL), - // m_loadError(false), - prog(prog), scheduler(scheduler), m_loadError(false), - m_oldNexusFileNames(oldNeXusFileNames), m_loadStart(), m_loadSize(), - m_event_id(nullptr), m_event_time_of_flight(nullptr), - m_have_weight(false), m_event_weight(nullptr), - m_framePeriodNumbers(framePeriodNumbers) { - setMutex(ioMutex); - m_cost = static_cast<double>(numEvents); - m_min_id = std::numeric_limits<uint32_t>::max(); - m_max_id = 0; - } - - //--------------------------------------------------------------------------------------------------- - /** Load the pulse times, if needed. This sets - * thisBankPulseTimes to the right pointer. - * */ - void loadPulseTimes(::NeXus::File &file) { - try { - // First, get info about the event_time_zero field in this bank - file.openData("event_time_zero"); - } catch (::NeXus::Exception &) { - // Field not found error is most likely. - // Use the "proton_charge" das logs. - thisBankPulseTimes = alg->m_allBanksPulseTimes; - return; - } - std::string thisStartTime; - size_t thisNumPulses = 0; - file.getAttr("offset", thisStartTime); - if (!file.getInfo().dims.empty()) - thisNumPulses = file.getInfo().dims[0]; - file.closeData(); - - // Now, we look through existing ones to see if it is already loaded - // thisBankPulseTimes = NULL; - for (auto &bankPulseTime : alg->m_bankPulseTimes) { - if (bankPulseTime->equals(thisNumPulses, thisStartTime)) { - thisBankPulseTimes = bankPulseTime; - return; - } - } - - // Not found? Need to load and add it - thisBankPulseTimes = boost::make_shared<BankPulseTimes>( - boost::ref(file), m_framePeriodNumbers); - alg->m_bankPulseTimes.push_back(thisBankPulseTimes); - } - - //--------------------------------------------------------------------------------------------------- - /** Load the event_index field - (a list of size of # of pulses giving the index in the event list for that - pulse) - - * @param file :: File handle for the NeXus file - * @param event_index :: ref to the vector - */ - void loadEventIndex(::NeXus::File &file, std::vector<uint64_t> &event_index) { - // Get the event_index (a list of size of # of pulses giving the index in - // the event list for that pulse) - file.openData("event_index"); - // Must be uint64 - if (file.getInfo().type == ::NeXus::UINT64) - file.getData(event_index); - else { - alg->getLogger().warning() - << "Entry " << entry_name - << "'s event_index field is not UINT64! It will be skipped.\n"; - m_loadError = true; - } - file.closeData(); - - // Look for the sign that the bank is empty - if (event_index.size() == 1) { - if (event_index[0] == 0) { - // One entry, only zero. This means NO events in this bank. - m_loadError = true; - alg->getLogger().debug() << "Bank " << entry_name << " is empty.\n"; - } - } - } - - //--------------------------------------------------------------------------------------------------- - /** Open the event_id field and validate the contents - * - * @param file :: File handle for the NeXus file - * @param start_event :: set to the index of the first event - * @param stop_event :: set to the index of the last event + 1 - * @param event_index :: (a list of size of # of pulses giving the index in - *the event list for that pulse) - */ - void prepareEventId(::NeXus::File &file, size_t &start_event, - size_t &stop_event, std::vector<uint64_t> &event_index) { - // Get the list of pixel ID's - if (m_oldNexusFileNames) - file.openData("event_pixel_id"); - else - file.openData("event_id"); - - // By default, use all available indices - start_event = 0; - ::NeXus::Info id_info = file.getInfo(); - // dims[0] can be negative in ISIS meaning 2^32 + dims[0]. Take that into - // account - int64_t dim0 = recalculateDataSize(id_info.dims[0]); - stop_event = static_cast<size_t>(dim0); - - // Handle the time filtering by changing the start/end offsets. - for (size_t i = 0; i < thisBankPulseTimes->numPulses; i++) { - if (thisBankPulseTimes->pulseTimes[i] >= alg->filter_time_start) { - start_event = event_index[i]; - break; // stop looking - } - } - - if (start_event > static_cast<size_t>(dim0)) { - // If the frame indexes are bad then we can't construct the times of the - // events properly and filtering by time - // will not work on this data - alg->getLogger().warning() - << this->entry_name - << "'s field 'event_index' seems to be invalid (start_index > than " - "the number of events in the bank)." - << "All events will appear in the same frame and filtering by time " - "will not be possible on this data.\n"; - start_event = 0; - stop_event = static_cast<size_t>(dim0); - } else { - for (size_t i = 0; i < thisBankPulseTimes->numPulses; i++) { - if (thisBankPulseTimes->pulseTimes[i] > alg->filter_time_stop) { - stop_event = event_index[i]; - break; - } - } - } - // We are loading part - work out the event number range - if (alg->chunk != EMPTY_INT()) { - start_event = (alg->chunk - alg->firstChunkForBank) * alg->eventsPerChunk; - // Don't change stop_event for the final chunk - if (start_event + alg->eventsPerChunk < stop_event) - stop_event = start_event + alg->eventsPerChunk; - } - - // Make sure it is within range - if (stop_event > static_cast<size_t>(dim0)) - stop_event = dim0; - - alg->getLogger().debug() << entry_name << ": start_event " << start_event - << " stop_event " << stop_event << "\n"; - } - - //--------------------------------------------------------------------------------------------------- - /** Load the event_id field, which has been open - */ - void loadEventId(::NeXus::File &file) { - // This is the data size - ::NeXus::Info id_info = file.getInfo(); - int64_t dim0 = recalculateDataSize(id_info.dims[0]); - - // Now we allocate the required arrays - m_event_id = new uint32_t[m_loadSize[0]]; - - // Check that the required space is there in the file. - if (dim0 < m_loadSize[0] + m_loadStart[0]) { - alg->getLogger().warning() << "Entry " << entry_name - << "'s event_id field is too small (" << dim0 - << ") to load the desired data size (" - << m_loadSize[0] + m_loadStart[0] << ").\n"; - m_loadError = true; - } - - if (alg->getCancel()) - m_loadError = true; // To allow cancelling the algorithm - - if (!m_loadError) { - // Must be uint32 - if (id_info.type == ::NeXus::UINT32) - file.getSlab(m_event_id, m_loadStart, m_loadSize); - else { - alg->getLogger().warning() - << "Entry " << entry_name - << "'s event_id field is not UINT32! It will be skipped.\n"; - m_loadError = true; - } - file.closeData(); - - // determine the range of pixel ids - for (auto i = 0; i < m_loadSize[0]; ++i) { - uint32_t temp = m_event_id[i]; - if (temp < m_min_id) - m_min_id = temp; - if (temp > m_max_id) - m_max_id = temp; - } - - if (m_min_id > static_cast<uint32_t>(alg->eventid_max)) { - // All the detector IDs in the bank are higher than the highest 'known' - // (from the IDF) - // ID. Setting this will abort the loading of the bank. - m_loadError = true; - } - // fixup the minimum pixel id in the case that it's lower than the lowest - // 'known' id. We test this by checking that when we add the offset we - // would not get a negative index into the vector. Note that m_min_id is - // a uint so we have to be cautious about adding it to an int which may be - // negative. - if (static_cast<int32_t>(m_min_id) + alg->pixelID_to_wi_offset < 0) { - m_min_id = static_cast<uint32_t>(abs(alg->pixelID_to_wi_offset)); - } - // fixup the maximum pixel id in the case that it's higher than the - // highest 'known' id - if (m_max_id > static_cast<uint32_t>(alg->eventid_max)) - m_max_id = static_cast<uint32_t>(alg->eventid_max); - } - } - - //--------------------------------------------------------------------------------------------------- - /** Open and load the times-of-flight data - */ - void loadTof(::NeXus::File &file) { - // Allocate the array - auto temp = new float[m_loadSize[0]]; - delete[] m_event_time_of_flight; - m_event_time_of_flight = temp; - - // Get the list of event_time_of_flight's - if (!m_oldNexusFileNames) - file.openData("event_time_offset"); - else - file.openData("event_time_of_flight"); - - // Check that the required space is there in the file. - ::NeXus::Info tof_info = file.getInfo(); - int64_t tof_dim0 = recalculateDataSize(tof_info.dims[0]); - if (tof_dim0 < m_loadSize[0] + m_loadStart[0]) { - alg->getLogger().warning() << "Entry " << entry_name - << "'s event_time_offset field is too small " - "to load the desired data.\n"; - m_loadError = true; - } - - // Check that the type is what it is supposed to be - if (tof_info.type == ::NeXus::FLOAT32) - file.getSlab(m_event_time_of_flight, m_loadStart, m_loadSize); - else { - alg->getLogger().warning() - << "Entry " << entry_name - << "'s event_time_offset field is not FLOAT32! It will be skipped.\n"; - m_loadError = true; - } - - if (!m_loadError) { - std::string units; - file.getAttr("units", units); - if (units != "microsecond") { - alg->getLogger().warning() << "Entry " << entry_name - << "'s event_time_offset field's units are " - "not microsecond. It will be skipped.\n"; - m_loadError = true; - } - file.closeData(); - } // no error - } - - //---------------------------------------------------------------------------------------------- - /** Load weight of weigthed events - */ - void loadEventWeights(::NeXus::File &file) { - try { - // First, get info about the event_weight field in this bank - file.openData("event_weight"); - } catch (::NeXus::Exception &) { - // Field not found error is most likely. - m_have_weight = false; - return; - } - // OK, we've got them - m_have_weight = true; - - // Allocate the array - auto temp = new float[m_loadSize[0]]; - delete[] m_event_weight; - m_event_weight = temp; - - ::NeXus::Info weight_info = file.getInfo(); - int64_t weight_dim0 = recalculateDataSize(weight_info.dims[0]); - if (weight_dim0 < m_loadSize[0] + m_loadStart[0]) { - alg->getLogger().warning() - << "Entry " << entry_name - << "'s event_weight field is too small to load the desired data.\n"; - m_loadError = true; - } - - // Check that the type is what it is supposed to be - if (weight_info.type == ::NeXus::FLOAT32) - file.getSlab(m_event_weight, m_loadStart, m_loadSize); - else { - alg->getLogger().warning() - << "Entry " << entry_name - << "'s event_weight field is not FLOAT32! It will be skipped.\n"; - m_loadError = true; - } - - if (!m_loadError) { - file.closeData(); - } - } - - //--------------------------------------------------------------------------------------------------- - void run() override { - // The vectors we will be filling - auto event_index_ptr = new std::vector<uint64_t>(); - std::vector<uint64_t> &event_index = *event_index_ptr; - - // These give the limits in each file as to which events we actually load - // (when filtering by time). - m_loadStart.resize(1, 0); - m_loadSize.resize(1, 0); - - // Data arrays - m_event_id = nullptr; - m_event_time_of_flight = nullptr; - m_event_weight = nullptr; - - m_loadError = false; - m_have_weight = alg->m_haveWeights; - - prog->report(entry_name + ": load from disk"); - - // Open the file - ::NeXus::File file(alg->m_filename); - try { - // Navigate into the file - file.openGroup(alg->m_top_entry_name, "NXentry"); - // Open the bankN_event group - file.openGroup(entry_name, entry_type); - - // Load the event_index field. - this->loadEventIndex(file, event_index); - - if (!m_loadError) { - // Load and validate the pulse times - this->loadPulseTimes(file); - - // The event_index should be the same length as the pulse times from DAS - // logs. - if (event_index.size() != thisBankPulseTimes->numPulses) - alg->getLogger().warning() - << "Bank " << entry_name - << " has a mismatch between the number of event_index entries " - "and the number of pulse times in event_time_zero.\n"; - - // Open and validate event_id field. - size_t start_event = 0; - size_t stop_event = 0; - this->prepareEventId(file, start_event, stop_event, event_index); - - // These are the arguments to getSlab() - m_loadStart[0] = static_cast<int>(start_event); - m_loadSize[0] = static_cast<int>(stop_event - start_event); - - if ((m_loadSize[0] > 0) && (m_loadStart[0] >= 0)) { - // Load pixel IDs - this->loadEventId(file); - if (alg->getCancel()) - m_loadError = true; // To allow cancelling the algorithm - - // And TOF. - if (!m_loadError) { - this->loadTof(file); - if (m_have_weight) { - this->loadEventWeights(file); - } - } - } // Size is at least 1 - else { - // Found a size that was 0 or less; stop processing - m_loadError = true; - } - - } // no error - - } // try block - catch (std::exception &e) { - alg->getLogger().error() << "Error while loading bank " << entry_name - << ":\n"; - alg->getLogger().error() << e.what() << '\n'; - m_loadError = true; - } catch (...) { - alg->getLogger().error() << "Unspecified error while loading bank " - << entry_name << '\n'; - m_loadError = true; - } - - // Close up the file even if errors occured. - file.closeGroup(); - file.close(); - - // Abort if anything failed - if (m_loadError) { - delete[] m_event_id; - delete[] m_event_time_of_flight; - if (m_have_weight) { - delete[] m_event_weight; - } - delete event_index_ptr; - - return; - } - - const auto bank_size = m_max_id - m_min_id; - const uint32_t minSpectraToLoad = static_cast<uint32_t>(alg->m_specMin); - const uint32_t maxSpectraToLoad = static_cast<uint32_t>(alg->m_specMax); - const uint32_t emptyInt = static_cast<uint32_t>(EMPTY_INT()); - // check that if a range of spectra were requested that these fit within - // this bank - if (minSpectraToLoad != emptyInt && m_min_id < minSpectraToLoad) { - if (minSpectraToLoad > m_max_id) { // the minimum spectra to load is more - // than the max of this bank - return; - } - // the min spectra to load is higher than the min for this bank - m_min_id = minSpectraToLoad; - } - if (maxSpectraToLoad != emptyInt && m_max_id > maxSpectraToLoad) { - if (maxSpectraToLoad < m_min_id) { - // the maximum spectra to load is less than the minimum of this bank - return; - } - // the max spectra to load is lower than the max for this bank - m_max_id = maxSpectraToLoad; - } - if (m_min_id > m_max_id) { - // the min is now larger than the max, this means the entire block of - // spectra to load is outside this bank - return; - } - - // schedule the job to generate the event lists - auto mid_id = m_max_id; - if (alg->splitProcessing && m_max_id > (m_min_id + (bank_size / 4))) - // only split if told to and the section to load is at least 1/4 the size - // of the whole bank - mid_id = (m_max_id + m_min_id) / 2; - - // No error? Launch a new task to process that data. - size_t numEvents = m_loadSize[0]; - size_t startAt = m_loadStart[0]; - - // convert things to shared_arrays - boost::shared_array<uint32_t> event_id_shrd(m_event_id); - boost::shared_array<float> event_time_of_flight_shrd( - m_event_time_of_flight); - boost::shared_array<float> event_weight_shrd(m_event_weight); - boost::shared_ptr<std::vector<uint64_t>> event_index_shrd(event_index_ptr); - - ProcessBankData *newTask1 = new ProcessBankData( - alg, entry_name, prog, event_id_shrd, event_time_of_flight_shrd, - numEvents, startAt, event_index_shrd, thisBankPulseTimes, m_have_weight, - event_weight_shrd, m_min_id, mid_id); - scheduler->push(newTask1); - if (alg->splitProcessing && (mid_id < m_max_id)) { - ProcessBankData *newTask2 = new ProcessBankData( - alg, entry_name, prog, event_id_shrd, event_time_of_flight_shrd, - numEvents, startAt, event_index_shrd, thisBankPulseTimes, - m_have_weight, event_weight_shrd, (mid_id + 1), m_max_id); - scheduler->push(newTask2); - } - } - - //--------------------------------------------------------------------------------------------------- - /** - * Interpret the value describing the number of events. If the number is - * positive return it unchanged. - * If the value is negative (can happen at ISIS) add 2^32 to it. - * @param size :: The size of events value. - */ - int64_t recalculateDataSize(const int64_t &size) { - if (size < 0) { - const int64_t shift = int64_t(1) << 32; - return shift + size; - } - return size; - } - -private: - /// Algorithm being run - LoadEventNexus *alg; - /// NXS path to bank - std::string entry_name; - /// NXS type - std::string entry_type; - /// Progress reporting - Progress *prog; - /// ThreadScheduler running this task - ThreadScheduler *scheduler; - /// Object with the pulse times for this bank - boost::shared_ptr<BankPulseTimes> thisBankPulseTimes; - /// Did we get an error in loading - bool m_loadError; - /// Old names in the file? - bool m_oldNexusFileNames; - /// Index to load start at in the file - std::vector<int> m_loadStart; - /// How much to load in the file - std::vector<int> m_loadSize; - /// Event pixel ID data - uint32_t *m_event_id; - /// Minimum pixel ID in this data - uint32_t m_min_id; - /// Maximum pixel ID in this data - uint32_t m_max_id; - /// TOF data - float *m_event_time_of_flight; - /// Flag for simulated data - bool m_have_weight; - /// Event weights - float *m_event_weight; - /// Frame period numbers - const std::vector<int> m_framePeriodNumbers; -}; // END-DEF-CLASS LoadBankFromDiskTask - -//=============================================================================================== -// LoadEventNexus -//=============================================================================================== - //---------------------------------------------------------------------------------------------- /** Empty default constructor */ LoadEventNexus::LoadEventNexus() - : IFileLoader<Kernel::NexusDescriptor>(), m_filename(), filter_tof_min(0), - filter_tof_max(0), m_specList(), m_specMin(0), m_specMax(0), - filter_time_start(), filter_time_stop(), chunk(0), totalChunks(0), - firstChunkForBank(0), eventsPerChunk(0), m_tofMutex(), longest_tof(0), - shortest_tof(0), bad_tofs(0), discarded_events(0), precount(false), - compressTolerance(0), eventVectors(), m_eventVectorMutex(), - eventid_max(0), pixelID_to_wi_vector(), pixelID_to_wi_offset(), - m_bankPulseTimes(), m_allBanksPulseTimes(), m_top_entry_name(), - m_file(nullptr), splitProcessing(false), m_haveWeights(false), - weightedEventVectors(), m_instrument_loaded_correctly(false), - loadlogs(false), m_logs_loaded_correctly(false), event_id_is_spec(false) { -} + : filter_tof_min(0), filter_tof_max(0), m_specMin(0), m_specMax(0), + longest_tof(0), shortest_tof(0), bad_tofs(0), discarded_events(0), + compressTolerance(0), m_file(nullptr), + m_instrument_loaded_correctly(false), loadlogs(false), + m_logs_loaded_correctly(false), event_id_is_spec(false) {} //---------------------------------------------------------------------------------------------- /** Destructor */ @@ -870,6 +308,12 @@ void LoadEventNexus::init() { declareProperty( make_unique<PropertyWithValue<bool>>("LoadLogs", true, Direction::Input), "Load the Sample/DAS logs from the file (default True)."); + +#ifdef MPI_EXPERIMENTAL + declareProperty(make_unique<PropertyWithValue<bool>>("UseParallelLoader", + true, Direction::Input), + "Use experimental parallel loader for loading event data."); +#endif } //---------------------------------------------------------------------------------------------- @@ -946,7 +390,6 @@ void LoadEventNexus::exec() { // Retrieve the filename from the properties m_filename = getPropertyValue("Filename"); - precount = getProperty("Precount"); compressTolerance = getProperty("CompressTolerance"); loadlogs = getProperty("LoadLogs"); @@ -1011,67 +454,7 @@ void LoadEventNexus::exec() { this->runLoadMonitors(); } } -} - -//----------------------------------------------------------------------------- -/** Generate a look-up table where the index = the pixel ID of an event -* and the value = a pointer to the EventList in the workspace -* @param vectors :: the array to create the map on -*/ -template <class T> -void LoadEventNexus::makeMapToEventLists(std::vector<std::vector<T>> &vectors) { - vectors.resize(m_ws->nPeriods()); - if (this->event_id_is_spec) { - // Find max spectrum no - Axis *ax1 = m_ws->getAxis(1); - specnum_t maxSpecNo = - -std::numeric_limits<specnum_t>::max(); // So that any number will be - // greater than this - for (size_t i = 0; i < ax1->length(); i++) { - specnum_t spec = ax1->spectraNo(i); - if (spec > maxSpecNo) - maxSpecNo = spec; - } - - // These are used by the bank loader to figure out where to put the events - // The index of eventVectors is a spectrum number so it is simply resized to - // the maximum - // possible spectrum number - eventid_max = maxSpecNo; - for (size_t i = 0; i < vectors.size(); ++i) { - vectors[i].resize(maxSpecNo + 1, nullptr); - } - for (size_t period = 0; period < m_ws->nPeriods(); ++period) { - for (size_t i = 0; i < m_ws->getNumberHistograms(); ++i) { - const auto &spec = m_ws->getSpectrum(i); - getEventsFrom(m_ws->getSpectrum(i, period), - vectors[period][spec.getSpectrumNo()]); - } - } - } else { - // To avoid going out of range in the vector, this is the MAX index that can - // go into it - eventid_max = static_cast<int32_t>(pixelID_to_wi_vector.size()) + - pixelID_to_wi_offset; - - // Make an array where index = pixel ID - // Set the value to NULL by default - for (size_t i = 0; i < vectors.size(); ++i) { - vectors[i].resize(eventid_max + 1, nullptr); - } - - for (size_t j = size_t(pixelID_to_wi_offset); - j < pixelID_to_wi_vector.size(); j++) { - size_t wi = pixelID_to_wi_vector[j]; - // Save a POINTER to the vector - if (wi < m_ws->getNumberHistograms()) { - for (size_t period = 0; period < m_ws->nPeriods(); ++period) { - getEventsFrom(m_ws->getSpectrum(wi, period), - vectors[period][j - pixelID_to_wi_offset]); - } - } - } - } + m_file->close(); } /** @@ -1120,21 +503,6 @@ std::size_t numEvents(::NeXus::File &file, bool &hasTotalCounts, return numEvents; } -void LoadEventNexus::createWorkspaceIndexMaps( - const bool monitors, const std::vector<std::string> &bankNames) { - // Create the required spectra mapping so that the workspace knows what to pad - // to - createSpectraMapping(m_filename, monitors, bankNames); - - // This map will be used to find the workspace index - if (this->event_id_is_spec) - pixelID_to_wi_vector = - m_ws->getSpectrumToWorkspaceIndexVector(pixelID_to_wi_offset); - else - pixelID_to_wi_vector = - m_ws->getDetectorIDToWorkspaceIndexVector(pixelID_to_wi_offset, true); -} - /** Load the instrument from the nexus file * * @param nexusfilename :: The name of the nexus file being loaded @@ -1310,7 +678,6 @@ void LoadEventNexus::loadEvents(API::Progress *const prog, // Make sure you have a non-NULL m_allBanksPulseTimes if (m_allBanksPulseTimes == nullptr) { std::vector<DateAndTime> temp; - // m_allBanksPulseTimes = new BankPulseTimes(temp); m_allBanksPulseTimes = boost::make_shared<BankPulseTimes>(temp); } @@ -1333,14 +700,13 @@ void LoadEventNexus::loadEvents(API::Progress *const prog, // Now we want to go through all the bankN_event entries vector<string> bankNames; vector<std::size_t> bankNumEvents; - size_t total_events = 0; map<string, string> entries = m_file->getEntries(); map<string, string>::const_iterator it = entries.begin(); std::string classType = monitors ? "NXmonitor" : "NXevent_data"; ::NeXus::Info info; bool oldNeXusFileNames(false); bool hasTotalCounts(true); - m_haveWeights = false; + bool haveWeights = false; for (; it != entries.end(); ++it) { std::string entry_name(it->first); std::string entry_class(it->second); @@ -1352,12 +718,11 @@ void LoadEventNexus::loadEvents(API::Progress *const prog, std::size_t num = numEvents(*m_file, hasTotalCounts, oldNeXusFileNames); bankNames.push_back(entry_name); bankNumEvents.push_back(num); - total_events += num; // Look for weights in simulated file try { m_file->openData("event_weight"); - m_haveWeights = true; + haveWeights = true; m_file->closeData(); } catch (::NeXus::Exception &) { // Swallow exception since flag is already false; @@ -1393,8 +758,6 @@ void LoadEventNexus::loadEvents(API::Progress *const prog, double filter_time_start_sec, filter_time_stop_sec; filter_time_start_sec = getProperty("FilterByTimeStart"); filter_time_stop_sec = getProperty("FilterByTimeStop"); - chunk = getProperty("ChunkNumber"); - totalChunks = getProperty("TotalChunks"); // Default to ALL pulse times bool is_time_filtered = false; @@ -1436,7 +799,7 @@ void LoadEventNexus::loadEvents(API::Progress *const prog, // Set the binning axis using this. m_ws->setAllX(axis); - createWorkspaceIndexMaps(monitors, std::vector<std::string>()); + createSpectraMapping(m_filename, monitors, std::vector<std::string>()); return; } @@ -1488,18 +851,7 @@ void LoadEventNexus::loadEvents(API::Progress *const prog, } } //----------------- Pad Empty Pixels ------------------------------- - createWorkspaceIndexMaps(monitors, someBanks); - - // Cache a map for speed. - if (!m_haveWeights) { - this->makeMapToEventLists<EventVector_pt>(eventVectors); - } else { - // Convert to weighted events - for (size_t i = 0; i < m_ws->getNumberHistograms(); i++) { - m_ws->getSpectrum(i).switchTo(API::WEIGHTED); - } - this->makeMapToEventLists<WeightedEventVector_pt>(weightedEventVectors); - } + createSpectraMapping(m_filename, monitors, someBanks); // Set all (empty) event lists as sorted by pulse time. That way, calling // SortEvents will not try to sort these empty lists. @@ -1511,96 +863,28 @@ void LoadEventNexus::loadEvents(API::Progress *const prog, static_cast<double>(std::numeric_limits<uint32_t>::max()) * 0.1; longest_tof = 0.; - // Make the thread pool - ThreadScheduler *scheduler = new ThreadSchedulerMutexes(); - ThreadPool pool(scheduler); - auto diskIOMutex = boost::make_shared<std::mutex>(); - size_t bank0 = 0; - size_t bankn = bankNames.size(); - - if (chunk != - EMPTY_INT()) // We are loading part - work out the bank number range - { - eventsPerChunk = total_events / totalChunks; - // Sort banks by size - size_t tmp; - string stmp; - for (size_t i = 0; i < bankn; i++) - for (size_t j = 0; j < bankn - 1; j++) - if (bankNumEvents[j] < bankNumEvents[j + 1]) { - tmp = bankNumEvents[j]; - bankNumEvents[j] = bankNumEvents[j + 1]; - bankNumEvents[j + 1] = tmp; - stmp = bankNames[j]; - bankNames[j] = bankNames[j + 1]; - bankNames[j + 1] = stmp; - } - int bigBanks = 0; - for (size_t i = 0; i < bankn; i++) - if (bankNumEvents[i] > eventsPerChunk) - bigBanks++; - // Each chunk is part of bank or multiple whole banks - // 0.5 for last chunk of a bank with multiple chunks - // 0.1 for multiple whole banks not completely filled - eventsPerChunk += - static_cast<size_t>((static_cast<double>(bigBanks) / - static_cast<double>(totalChunks) * 0.5 + - 0.05) * - static_cast<double>(eventsPerChunk)); - double partialChunk = 0.; - firstChunkForBank = 1; - for (int chunki = 1; chunki <= chunk; chunki++) { - if (partialChunk > 1.) { - partialChunk = 0.; - firstChunkForBank = chunki; - bank0 = bankn; - } - if (bankNumEvents[bank0] > 1) { - partialChunk += static_cast<double>(eventsPerChunk) / - static_cast<double>(bankNumEvents[bank0]); - } - if (chunki < totalChunks) - bankn = bank0 + 1; - else - bankn = bankNames.size(); - if (chunki == firstChunkForBank && partialChunk > 1.0) - bankn += static_cast<size_t>(partialChunk) - 1; - if (bankn > bankNames.size()) - bankn = bankNames.size(); - } - for (size_t i = bank0; i < bankn; i++) { - size_t start_event = (chunk - firstChunkForBank) * eventsPerChunk; - size_t stop_event = bankNumEvents[i]; - // Don't change stop_event for the final chunk - if (start_event + eventsPerChunk < stop_event) - stop_event = start_event + eventsPerChunk; - bankNumEvents[i] = stop_event - start_event; + bool loaded{false}; + if (canUseParallelLoader(haveWeights, oldNeXusFileNames, classType)) { + auto ws = m_ws->getSingleHeldWorkspace(); + m_file->close(); + try { + ParallelEventLoader::load(*ws, m_filename, m_top_entry_name, bankNames); + loaded = true; + shortest_tof = 0.0; + longest_tof = 1e10; + } catch (const std::runtime_error &) { } + safeOpenFile(m_filename); } - - // split banks up if the number of cores is more than twice the number of - // banks - splitProcessing = - bool(bankNames.size() * 2 < ThreadPool::getNumPhysicalCores()); - - // set up progress bar for the rest of the (multi-threaded) process - size_t numProg = bankNames.size() * (1 + 3); // 1 = disktask, 3 = proc task - if (splitProcessing) - numProg += bankNames.size() * 3; // 3 = second proc task - auto prog2 = make_unique<Progress>(this, 0.3, 1.0, numProg); - - const std::vector<int> periodLogVec = periodLog->valuesAsVector(); - - for (size_t i = bank0; i < bankn; i++) { - // We make tasks for loading - if (bankNumEvents[i] > 0) - pool.schedule(new LoadBankFromDiskTask( - this, bankNames[i], classType, bankNumEvents[i], oldNeXusFileNames, - prog2.get(), diskIOMutex, scheduler, periodLogVec)); + if (!loaded) { + bool precount = getProperty("Precount"); + int chunk = getProperty("ChunkNumber"); + int totalChunks = getProperty("TotalChunks"); + DefaultEventLoader::load(this, *m_ws, haveWeights, event_id_is_spec, + bankNames, periodLog->valuesAsVector(), classType, + bankNumEvents, oldNeXusFileNames, precount, chunk, + totalChunks); } - // Start and end all threads - pool.joinAll(); - diskIOMutex.reset(); // Info reporting const std::size_t eventsLoaded = m_ws->getNumberEvents(); @@ -1825,56 +1109,30 @@ void LoadEventNexus::deleteBanks(EventWorkspaceCollection_sptr workspace, void LoadEventNexus::createSpectraMapping( const std::string &nxsfile, const bool monitorsOnly, const std::vector<std::string> &bankNames) { - bool spectramap = false; - m_specMin = getProperty("SpectrumMin"); - m_specMax = getProperty("SpectrumMax"); - m_specList = getProperty("SpectrumList"); - - // set up the + LoadEventNexusIndexSetup indexSetup( + m_ws->getSingleHeldWorkspace(), getProperty("SpectrumMin"), + getProperty("SpectrumMax"), getProperty("SpectrumList"), communicator()); if (!monitorsOnly && !bankNames.empty()) { - std::vector<IDetector_const_sptr> allDets; - - for (const auto &bankName : bankNames) { - // Only build the map for the single bank - std::vector<IDetector_const_sptr> dets; - m_ws->getInstrument()->getDetectorsInBank(dets, bankName); - if (dets.empty()) - throw std::runtime_error("Could not find the bank named '" + bankName + - "' as a component assembly in the instrument " - "tree; or it did not contain any detectors." - " Try unchecking SingleBankPixelsOnly."); - allDets.insert(allDets.end(), dets.begin(), dets.end()); - } - if (!allDets.empty()) { - m_ws->resizeTo(allDets.size()); - // Make an event list for each. - for (size_t wi = 0; wi < allDets.size(); wi++) { - const detid_t detID = allDets[wi]->getID(); - m_ws->setDetectorIdsForAllPeriods(wi, detID); - } - spectramap = true; - g_log.debug() << "Populated spectra map for select banks\n"; + if (!isDefault("SpectrumMin") || !isDefault("SpectrumMax") || + !isDefault("SpectrumList")) + g_log.warning() << "Spectrum min/max/list selection ignored when " + "`SingleBankPixelsOnly` is enabled\n"; + m_ws->setIndexInfo(indexSetup.makeIndexInfo(bankNames)); + g_log.debug() << "Populated spectra map for select banks\n"; + } else if (auto mapping = loadISISVMSSpectraMapping(m_top_entry_name)) { + if (monitorsOnly) { + g_log.debug() << "Loading only monitor spectra from " << nxsfile << "\n"; + } else { + g_log.debug() << "Loading only detector spectra from " << nxsfile << "\n"; } - + m_ws->setIndexInfo(indexSetup.makeIndexInfo(*mapping, monitorsOnly)); } else { - spectramap = loadSpectraMapping(nxsfile, monitorsOnly, m_top_entry_name); - // Did we load one? If so then the event ID is the spectrum number and not - // det ID - if (spectramap) - this->event_id_is_spec = true; - } - - if (!spectramap) { g_log.debug() << "No custom spectra mapping found, continuing with default " "1:1 mapping of spectrum:detectorID\n"; - auto specList = m_ws->getInstrument()->getDetectorIDs(true); - createSpectraList(*std::min_element(specList.begin(), specList.end()), - *std::max_element(specList.begin(), specList.end())); - // The default 1:1 will suffice but exclude the monitors as they are always - // in a separate workspace - m_ws->padSpectra(m_specList); + m_ws->setIndexInfo(indexSetup.makeIndexInfo()); g_log.debug() << "Populated 1:1 spectra map for the whole instrument \n"; } + std::tie(m_specMin, m_specMax) = indexSetup.eventIDLimits(); } //----------------------------------------------------------------------------- @@ -2054,21 +1312,18 @@ void LoadEventNexus::runLoadMonitors() { * existence of * an isis_vms_compat block in the file, if it exists it pulls out the spectra * mapping listed there -* @param filename :: A filename -* @param monitorsOnly :: If true then only the monitor spectra are loaded * @param entry_name :: name of the NXentry to open. * @returns True if the mapping was loaded or false if the block does not exist */ -bool LoadEventNexus::loadSpectraMapping(const std::string &filename, - const bool monitorsOnly, - const std::string &entry_name) { +std::unique_ptr<std::pair<std::vector<int32_t>, std::vector<int32_t>>> +LoadEventNexus::loadISISVMSSpectraMapping(const std::string &entry_name) { const std::string vms_str = "/isis_vms_compat"; try { g_log.debug() << "Attempting to load custom spectra mapping from '" << entry_name << vms_str << "'.\n"; m_file->openPath("/" + entry_name + vms_str); } catch (::NeXus::Exception &) { - return false; // Doesn't exist + return nullptr; // Doesn't exist } // The ISIS spectrum mapping is defined by 2 arrays in isis_vms_compat block: @@ -2112,57 +1367,11 @@ bool LoadEventNexus::loadSpectraMapping(const std::string &filename, << ", SPEC=" << spec.size() << "\n"; throw std::runtime_error(os.str()); } - // Monitor filtering/selection - const std::vector<detid_t> monitors = m_ws->getInstrument()->getMonitors(); - const size_t nmons(monitors.size()); - if (monitorsOnly) { - g_log.debug() << "Loading only monitor spectra from " << filename << "\n"; - // Find the det_ids in the udet array. - m_ws->resizeTo(nmons); - for (size_t i = 0; i < nmons; ++i) { - // Find the index in the udet array - const detid_t &id = monitors[i]; - std::vector<int32_t>::const_iterator it = - std::find(udet.begin(), udet.end(), id); - if (it != udet.end()) { - const specnum_t &specNo = spec[it - udet.begin()]; - m_ws->setSpectrumNumberForAllPeriods(i, specNo); - m_ws->setDetectorIdsForAllPeriods(i, id); - } - } - } else { - g_log.debug() << "Loading only detector spectra from " << filename << "\n"; - - // If optional spectra are provided, if so, m_specList is initialized. spec - // is used if necessary - createSpectraList(*std::min_element(spec.begin(), spec.end()), - *std::max_element(spec.begin(), spec.end())); - - if (!m_specList.empty()) { - int i = 0; - std::vector<int32_t> spec_temp, udet_temp; - for (auto &element : spec) { - if (find(m_specList.begin(), m_specList.end(), element) != - m_specList.end()) // spec element *it is not in spec_list - { - spec_temp.push_back(element); - udet_temp.push_back(udet.at(i)); - } - i++; - } - spec = spec_temp; - udet = udet_temp; - } - - SpectrumDetectorMapping mapping(spec, udet, monitors); - m_ws->resizeTo(mapping.getMapping().size()); - // Make sure spectrum numbers are correct - auto uniqueSpectra = mapping.getSpectrumNumbers(); - m_ws->setSpectrumNumbersFromUniqueSpectra(uniqueSpectra); - // Fill detectors based on this mapping - m_ws->updateSpectraUsing(mapping); - } - return true; + // If mapping loaded the event ID is the spectrum number and not det ID + this->event_id_is_spec = true; + return Kernel::make_unique< + std::pair<std::vector<int32_t>, std::vector<int32_t>>>(std::move(spec), + std::move(udet)); } /** @@ -2438,80 +1647,6 @@ void LoadEventNexus::loadSampleDataISIScompatibility( file.closeGroup(); } -/** -* Check the validity of the optional spectrum range/list provided and identify -*if partial data should be loaded. -* -* @param min :: The minimum spectrum number read from file -* @param max :: The maximum spectrum number read from file -*/ - -void LoadEventNexus::createSpectraList(int32_t min, int32_t max) { - - // check if range [SpectrumMin, SpectrumMax] was supplied - if (m_specMin != EMPTY_INT() || m_specMax != EMPTY_INT()) { - if (m_specMax == EMPTY_INT()) { - m_specMax = max; - } - if (m_specMin == EMPTY_INT()) { - m_specMin = min; - } - - if (m_specMax > max) { - throw std::invalid_argument("Inconsistent range property: SpectrumMax is " - "larger than maximum spectrum found in " - "file."); - } - - // Sanity checks for min/max - if (m_specMin > m_specMax) { - throw std::invalid_argument("Inconsistent range property: SpectrumMin is " - "larger than SpectrumMax."); - } - - // Populate spec_list - for (int32_t i = m_specMin; i <= m_specMax; i++) - m_specList.push_back(i); - } else { - // Check if SpectrumList was supplied - - if (!m_specList.empty()) { - // Check no negative/zero numbers have been passed - auto itr = std::find_if(m_specList.begin(), m_specList.end(), - std::bind2nd(std::less<int32_t>(), 1)); - if (itr != m_specList.end()) { - throw std::invalid_argument( - "Negative/Zero SpectraList property encountered."); - } - - // Check range and set m_specMax to maximum value in m_specList - if ((m_specMax = - *std::max_element(m_specList.begin(), m_specList.end())) > - *std::max_element(m_specList.begin(), m_specList.end())) { - throw std::invalid_argument("Inconsistent range property: SpectrumMax " - "is larger than number of spectra."); - } - - // Set m_specMin to minimum value in m_specList - m_specMin = *std::min_element(m_specList.begin(), m_specList.end()); - } - } - - if (!m_specList.empty()) { - - // Check that spectra supplied by user do not correspond to monitors - auto nmonitors = m_ws->getInstrument()->getMonitors().size(); - - for (size_t i = 0; i < nmonitors; ++i) { - if (std::find(m_specList.begin(), m_specList.end(), i + 1) != - m_specList.end()) { - throw std::invalid_argument("Inconsistent range property: some of the " - "selected spectra correspond to monitors."); - } - } - } -} - /** * Makes sure that m_file is a valid and open NeXus::File object. * Throws if there is an exception opening the file. @@ -2534,5 +1669,48 @@ void LoadEventNexus::safeOpenFile(const std::string fname) { } } +/// The parallel loader currently has no support for a series of special cases, +/// as indicated by the return value of this method. +bool LoadEventNexus::canUseParallelLoader(const bool haveWeights, + const bool oldNeXusFileNames, + const std::string &classType) const { +#ifndef MPI_EXPERIMENTAL + // Actually the parallel loader would work also in non-MPI builds but it is + // likely to be slower than the default loader and may also exhibit unusual + // behavior for non-standard Nexus files. + return false; +#else + bool useParallelLoader = getProperty("UseParallelLoader"); + if (!useParallelLoader) + return false; +#endif + if (m_ws->nPeriods() != 1) + return false; + if (haveWeights) + return false; + if (event_id_is_spec) + return false; + if (oldNeXusFileNames) + return false; + if (filter_tof_min != -1e20 || filter_tof_max != 1e20) + return false; + if (filter_time_start != Types::Core::DateAndTime::minimum() || + filter_time_stop != Types::Core::DateAndTime::maximum()) + return false; + if (!isDefault("CompressTolerance") || !isDefault("SpectrumMin") || + !isDefault("SpectrumMax") || !isDefault("SpectrumList") || + !isDefault("ChunkNumber")) + return false; + if (classType != "NXevent_data") + return false; + return true; +} + +Parallel::ExecutionMode LoadEventNexus::getParallelExecutionMode( + const std::map<std::string, Parallel::StorageMode> &storageModes) const { + static_cast<void>(storageModes); + return Parallel::ExecutionMode::Distributed; +} + } // namespace DataHandling } // namespace Mantid diff --git a/Framework/DataHandling/src/LoadEventNexusIndexSetup.cpp b/Framework/DataHandling/src/LoadEventNexusIndexSetup.cpp new file mode 100644 index 0000000000000000000000000000000000000000..046ed078ab6d404824f116db8477c434e3e9212c --- /dev/null +++ b/Framework/DataHandling/src/LoadEventNexusIndexSetup.cpp @@ -0,0 +1,198 @@ +#include "MantidDataHandling/LoadEventNexusIndexSetup.h" +#include "MantidGeometry/Instrument.h" +#include "MantidGeometry/Instrument/ComponentInfo.h" +#include "MantidGeometry/Instrument/DetectorInfo.h" +#include "MantidAPI/SpectrumDetectorMapping.h" +#include "MantidIndexing/Extract.h" +#include "MantidIndexing/Scatter.h" +#include "MantidIndexing/SpectrumIndexSet.h" +#include "MantidIndexing/SpectrumNumber.h" +#include "MantidTypes/SpectrumDefinition.h" + +using namespace Mantid::API; +using namespace Mantid::Indexing; + +namespace Mantid { +namespace DataHandling { + +namespace { +void setupConsistentSpectrumNumbers(IndexInfo &filtered, + const std::vector<detid_t> &detIDs) { + std::vector<Indexing::SpectrumNumber> spectrumNumbers; + // Temporary spectrum number in `filtered` was detector ID, now translate + // to spectrum number, starting at 1. Note that we use detIDs and not + // DetectorInfo for translation since we need to match the unfiltered + // spectrum numbers, which are based on skipping monitors (which would be + // included in DetectorInfo). + for (int32_t i = 0; i < static_cast<int32_t>(detIDs.size()); ++i) { + if (filtered.spectrumNumber(spectrumNumbers.size()) == detIDs[i]) + spectrumNumbers.push_back(i + 1); + if (filtered.size() == spectrumNumbers.size()) + break; + } + filtered.setSpectrumNumbers(std::move(spectrumNumbers)); +} +} + +LoadEventNexusIndexSetup::LoadEventNexusIndexSetup( + MatrixWorkspace_const_sptr instrumentWorkspace, const int32_t min, + const int32_t max, const std::vector<int32_t> range, + const Parallel::Communicator &communicator) + : m_instrumentWorkspace(instrumentWorkspace), m_min(min), m_max(max), + m_range(range), m_communicator(communicator) {} + +std::pair<int32_t, int32_t> LoadEventNexusIndexSetup::eventIDLimits() const { + return {m_min, m_max}; +} + +IndexInfo LoadEventNexusIndexSetup::makeIndexInfo() { + // The default 1:1 will suffice but exclude the monitors as they are always in + // a separate workspace + auto detIDs = m_instrumentWorkspace->getInstrument()->getDetectorIDs(true); + const auto &detectorInfo = m_instrumentWorkspace->detectorInfo(); + std::vector<SpectrumDefinition> specDefs; + for (const auto detID : detIDs) + specDefs.emplace_back(detectorInfo.indexOf(detID)); + // We need to filter based on detector IDs, but use IndexInfo for filtering + // for a unified filtering mechanism. Thus we set detector IDs as (temporary) + // spectrum numbers. + IndexInfo indexInfo(std::vector<SpectrumNumber>(detIDs.begin(), detIDs.end()), + Parallel::StorageMode::Cloned, m_communicator); + indexInfo.setSpectrumDefinitions(specDefs); + + auto filtered = filterIndexInfo(indexInfo); + + // Spectrum numbers are continuous and start at 1. If there is a filter, + // spectrum numbers are set up to be consistent with the unfiltered case. + if (filtered.size() == indexInfo.size()) { + filtered.setSpectrumNumbers(1, static_cast<int32_t>(filtered.size())); + } else { + setupConsistentSpectrumNumbers(filtered, detIDs); + } + + return scatter(filtered); +} + +IndexInfo LoadEventNexusIndexSetup::makeIndexInfo( + const std::vector<std::string> &bankNames) { + const auto &componentInfo = m_instrumentWorkspace->componentInfo(); + const auto &detectorInfo = m_instrumentWorkspace->detectorInfo(); + std::vector<SpectrumDefinition> spectrumDefinitions; + // Temporary spectrum numbers setup up to be detector IDs, used for finding + // correct spectrum number to be consistent with unfiltered case. + std::vector<SpectrumNumber> spectrumNumbers; + const auto &instrument = m_instrumentWorkspace->getInstrument(); + for (const auto &bankName : bankNames) { + const auto &bank = instrument->getComponentByName(bankName); + std::vector<size_t> dets; + if (bank) { + const auto bankIndex = componentInfo.indexOf(bank->getComponentID()); + dets = componentInfo.detectorsInSubtree(bankIndex); + for (const auto detIndex : dets) { + spectrumDefinitions.emplace_back(detIndex); + spectrumNumbers.emplace_back(detectorInfo.detectorIDs()[detIndex]); + } + } + if (dets.empty()) + throw std::runtime_error("Could not find the bank named '" + bankName + + "' as a component assembly in the instrument " + "tree; or it did not contain any detectors. Try " + "unchecking SingleBankPixelsOnly."); + } + Indexing::IndexInfo indexInfo(std::move(spectrumNumbers), + Parallel::StorageMode::Cloned, m_communicator); + indexInfo.setSpectrumDefinitions(std::move(spectrumDefinitions)); + setupConsistentSpectrumNumbers(indexInfo, instrument->getDetectorIDs(true)); + // Filters are ignored when selecting bank names. Reset min/max to avoid + // unintended dropping of events in the loader. + m_min = EMPTY_INT(); + m_max = EMPTY_INT(); + return scatter(indexInfo); +} + +IndexInfo LoadEventNexusIndexSetup::makeIndexInfo( + const std::pair<std::vector<int32_t>, std::vector<int32_t>> & + spectrumDetectorMapping, + const bool monitorsOnly) { + const auto &spec = spectrumDetectorMapping.first; + const auto &udet = spectrumDetectorMapping.second; + + const std::vector<detid_t> monitors = + m_instrumentWorkspace->getInstrument()->getMonitors(); + const auto &detectorInfo = m_instrumentWorkspace->detectorInfo(); + if (monitorsOnly) { + std::vector<Indexing::SpectrumNumber> spectrumNumbers; + std::vector<SpectrumDefinition> spectrumDefinitions; + // Find the det_ids in the udet array. + for (const auto id : monitors) { + // Find the index in the udet array + auto it = std::find(udet.begin(), udet.end(), id); + if (it != udet.end()) { + const specnum_t &specNo = spec[it - udet.begin()]; + spectrumNumbers.emplace_back(specNo); + spectrumDefinitions.emplace_back(detectorInfo.indexOf(id)); + } + } + Indexing::IndexInfo indexInfo( + spectrumNumbers, Parallel::StorageMode::Cloned, m_communicator); + indexInfo.setSpectrumDefinitions(std::move(spectrumDefinitions)); + return scatter(indexInfo); + } else { + SpectrumDetectorMapping mapping(spec, udet, monitors); + auto uniqueSpectra = mapping.getSpectrumNumbers(); + std::vector<SpectrumDefinition> spectrumDefinitions; + for (const auto spec : uniqueSpectra) { + spectrumDefinitions.emplace_back(); + for (const auto detID : mapping.getDetectorIDsForSpectrumNo(spec)) { + try { + spectrumDefinitions.back().add(detectorInfo.indexOf(detID)); + } catch (std::out_of_range &) { + // Discarding detector IDs that do not exist in the instrument. + } + } + } + Indexing::IndexInfo indexInfo( + std::vector<Indexing::SpectrumNumber>(uniqueSpectra.begin(), + uniqueSpectra.end()), + Parallel::StorageMode::Cloned, m_communicator); + indexInfo.setSpectrumDefinitions(std::move(spectrumDefinitions)); + return scatter(filterIndexInfo(indexInfo)); + } +} + +/** Filter IndexInfo based on optional spectrum range/list provided. + * + * Checks the validity of user provided spectrum range/list. This method assumes + * that spectrum numbers in `indexInfo` argument are sorted and that the + * Parallel::StorageMode of `indexInfo` is `Cloned`. */ +IndexInfo +LoadEventNexusIndexSetup::filterIndexInfo(const IndexInfo &indexInfo) { + // Check if range [SpectrumMin, SpectrumMax] was supplied + if (m_min != EMPTY_INT() || m_max != EMPTY_INT()) { + if (m_max == EMPTY_INT()) + m_max = + static_cast<int32_t>(indexInfo.spectrumNumber(indexInfo.size() - 1)); + if (m_min == EMPTY_INT()) + m_min = static_cast<int32_t>(indexInfo.spectrumNumber(0)); + // Avoid adding non-existing indices (can happen if instrument has gaps in + // its detector IDs). IndexInfo does the filtering for use. + const auto indices = indexInfo.makeIndexSet( + static_cast<SpectrumNumber>(m_min), static_cast<SpectrumNumber>(m_max)); + for (const auto &index : indices) + m_range.push_back(static_cast<int32_t>(indexInfo.spectrumNumber(index))); + } + // Check if SpectrumList was supplied (or filled via min/max above) + if (!m_range.empty()) { + std::sort(m_range.begin(), m_range.end()); + const auto indices = indexInfo.makeIndexSet( + std::vector<SpectrumNumber>(m_range.begin(), m_range.end())); + m_min = static_cast<int32_t>(indexInfo.spectrumNumber(*indices.begin())); + m_max = + static_cast<int32_t>(indexInfo.spectrumNumber(*(indices.end() - 1))); + return extract(indexInfo, indices); + } + return indexInfo; +} + +} // namespace DataHandling +} // namespace Mantid diff --git a/Framework/DataHandling/src/LoadISISNexus2.cpp b/Framework/DataHandling/src/LoadISISNexus2.cpp index 370fef1cfddfd85ff5f1e05cee7436f52b0cfdf9..9f3a54baf0b857cd1f84b789cc2f9cfc01f5e0eb 100644 --- a/Framework/DataHandling/src/LoadISISNexus2.cpp +++ b/Framework/DataHandling/src/LoadISISNexus2.cpp @@ -696,18 +696,6 @@ void LoadISISNexus2::buildSpectraInd2SpectraNumMap( } } -namespace { -/// Compare two spectra blocks for ordering -bool compareSpectraBlocks(const LoadISISNexus2::SpectraBlock &block1, - const LoadISISNexus2::SpectraBlock &block2) { - bool res = block1.last < block2.first; - if (!res) { - assert(block2.last < block1.first); - } - return res; -} -} - /** * Analyze the spectra ranges and prepare a list contiguous blocks. Each monitor * must be @@ -738,7 +726,11 @@ LoadISISNexus2::prepareSpectraBlocks(std::map<int64_t, std::string> &monitors, // sort and check for overlapping if (m_spectraBlocks.size() > 1) { std::sort(m_spectraBlocks.begin(), m_spectraBlocks.end(), - compareSpectraBlocks); + [](const LoadISISNexus2::SpectraBlock &block1, + const LoadISISNexus2::SpectraBlock &block2) { + return block1.last < block2.first; + }); + checkOverlappingSpectraRange(); } // Remove monitors that have been used. @@ -765,14 +757,34 @@ LoadISISNexus2::prepareSpectraBlocks(std::map<int64_t, std::string> &monitors, } /** -* Load a given period into the workspace -* @param period :: The period number to load (starting from 1) -* @param entry :: The opened root entry node for accessing the monitor and data -* nodes -* @param local_workspace :: The workspace to place the data in -* @param update_spectra2det_mapping :: reset spectra-detector map to the one -* calculated earlier. (Warning! -- this map has to be calculated correctly!) -*/ + * Check if any spectra block ranges overlap. + * + * Iterate over the sorted list of spectra blocks and check + * if the last element of the preceeding block is less than + * the first element of the next block. + */ +void LoadISISNexus2::checkOverlappingSpectraRange() { + for (size_t i = 1; i < m_spectraBlocks.size(); ++i) { + const auto &block1 = m_spectraBlocks[i - 1]; + const auto &block2 = m_spectraBlocks[i]; + if (block1.first > block1.last && block2.first > block2.last) + throw std::runtime_error("LoadISISNexus2: inconsistent spectra ranges"); + if (block1.last >= block2.first) { + throw std::runtime_error( + "LoadISISNexus2: the range of SpectraBlocks must not overlap"); + } + } +} + +/** + * Load a given period into the workspace + * @param period :: The period number to load (starting from 1) + * @param entry :: The opened root entry node for accessing the monitor and data + * nodes + * @param local_workspace :: The workspace to place the data in + * @param update_spectra2det_mapping :: reset spectra-detector map to the one + * calculated earlier. (Warning! -- this map has to be calculated correctly!) + */ void LoadISISNexus2::loadPeriodData( int64_t period, NXEntry &entry, DataObjects::Workspace2D_sptr &local_workspace, diff --git a/Framework/DataHandling/src/LoadInstrument.cpp b/Framework/DataHandling/src/LoadInstrument.cpp index 2a33d40254e09bed681e7180e5abd99e53abad75..d5984cfeef441565b9434b619712647429b2992f 100644 --- a/Framework/DataHandling/src/LoadInstrument.cpp +++ b/Framework/DataHandling/src/LoadInstrument.cpp @@ -34,10 +34,6 @@ using namespace Geometry; std::recursive_mutex LoadInstrument::m_mutex; -/// Empty default constructor -LoadInstrument::LoadInstrument() : Algorithm() {} - -//------------------------------------------------------------------------------------------------------------------------------ /// Initialisation method. void LoadInstrument::init() { // When used as a Child Algorithm the workspace name is not used - hence the @@ -188,16 +184,17 @@ void LoadInstrument::exec() { // Add to data service for later retrieval InstrumentDataService::Instance().add(instrumentNameMangled, instrument); } - } - // Add the instrument to the workspace - m_workspace->setInstrument(instrument); + m_workspace->setInstrument(instrument); - // populate parameter map of workspace - m_workspace->populateInstrumentParameters(); + // populate parameter map of workspace + m_workspace->populateInstrumentParameters(); - // check if default parameter file is also present, unless loading from - if (!m_filename.empty()) - runLoadParameterFile(); + // LoadParameterFile modifies the base instrument stored in the IDS so this + // must also be protected by the lock until LoadParameterFile is fixed. + // check if default parameter file is also present, unless loading from + if (!m_filename.empty()) + runLoadParameterFile(); + } // Set the monitors output property setProperty("MonitorList", instrument->getMonitors()); diff --git a/Framework/DataHandling/src/LoadMLZ.cpp b/Framework/DataHandling/src/LoadMLZ.cpp index 23159b7e7906fa1d292d109799748df2adcff780..477d7a9dd996caca4538d356bbd4491ede76a7a8 100644 --- a/Framework/DataHandling/src/LoadMLZ.cpp +++ b/Framework/DataHandling/src/LoadMLZ.cpp @@ -302,7 +302,8 @@ void LoadMLZ::loadRunDetails(NXEntry &entry) { runDetails.addProperty("temperature", temperature, "K", true); } - runDetails.addProperty("monitor_counts", m_monitorCounts); + runDetails.addProperty("monitor_counts", + static_cast<double>(m_monitorCounts)); runDetails.addProperty("chopper_speed", m_chopper_speed); runDetails.addProperty("chopper_ratio", m_chopper_ratio); runDetails.addProperty("channel_width", m_channelWidth, "microseconds", true); diff --git a/Framework/DataHandling/src/LoadParameterFile.cpp b/Framework/DataHandling/src/LoadParameterFile.cpp index 886831c1c654418bd0c5ecfe84f066b943662f3c..08963e98fb1d447ff65f89fd2c0c01c4c7208a53 100644 --- a/Framework/DataHandling/src/LoadParameterFile.cpp +++ b/Framework/DataHandling/src/LoadParameterFile.cpp @@ -32,9 +32,6 @@ using namespace API; using Geometry::Instrument; using Geometry::Instrument_sptr; -/// Empty default constructor -LoadParameterFile::LoadParameterFile() : Algorithm() {} - /// Initialisation method. void LoadParameterFile::init() { // When used as a Child Algorithm the workspace name is not used - hence the diff --git a/Framework/DataHandling/src/MaskDetectors.cpp b/Framework/DataHandling/src/MaskDetectors.cpp index bf34627a16294e9b84014a9637336f999646ae16..09d2823db8df68e428e4558bc3ca8b8ee2ae84bc 100644 --- a/Framework/DataHandling/src/MaskDetectors.cpp +++ b/Framework/DataHandling/src/MaskDetectors.cpp @@ -11,6 +11,7 @@ #include "MantidKernel/ArrayProperty.h" #include "MantidKernel/BoundedValidator.h" #include "MantidKernel/EnabledWhenProperty.h" +#include <algorithm> #include <numeric> #include <set> @@ -170,7 +171,8 @@ void MaskDetectors::exec() { // appropriate spectra number and adding the indices they are linked to the // list to be processed if (!spectraList.empty()) { - fillIndexListFromSpectra(indexList, spectraList, WS, ranges_info); + fillIndexListFromSpectra(indexList, std::move(spectraList), WS, + ranges_info); } // End dealing with spectraList if (!detectorList.empty()) { // Convert from detectors to workspace indexes @@ -451,7 +453,7 @@ void MaskDetectors::execPeaks(PeaksWorkspace_sptr WS) { */ void MaskDetectors::fillIndexListFromSpectra( std::vector<size_t> &indexList, - const std::vector<Indexing::SpectrumNumber> &spectraList, + std::vector<Indexing::SpectrumNumber> spectraList, const API::MatrixWorkspace_sptr WS, const std::tuple<size_t, size_t, bool> &range_info) { @@ -466,6 +468,12 @@ void MaskDetectors::fillIndexListFromSpectra( tmp_index.swap(indexList); } + // Ignore duplicate entries. + std::sort(spectraList.begin(), spectraList.end()); + auto last = std::unique(spectraList.begin(), spectraList.end()); + if (last != spectraList.end()) + g_log.warning("Duplicate entries in spectrum list."); + spectraList.erase(last, spectraList.end()); for (auto ws_index : WS->indexInfo().makeIndexSet(spectraList)) { if (range_constrained && (ws_index < startIndex || ws_index > endIndex)) { continue; diff --git a/Framework/DataHandling/src/ParallelEventLoader.cpp b/Framework/DataHandling/src/ParallelEventLoader.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b218ec669da520657e582aea68e85971ffa10694 --- /dev/null +++ b/Framework/DataHandling/src/ParallelEventLoader.cpp @@ -0,0 +1,63 @@ +#include "MantidDataHandling/ParallelEventLoader.h" +#include "MantidDataObjects/EventWorkspace.h" +#include "MantidGeometry/Instrument/DetectorInfo.h" +#include "MantidIndexing/IndexInfo.h" +#include "MantidParallel/IO/EventLoader.h" +#include "MantidTypes/SpectrumDefinition.h" +#include "MantidTypes/Event/TofEvent.h" + +namespace Mantid { +namespace DataHandling { + +/// Return offset between global spectrum index and detector ID for given banks. +std::vector<int32_t> bankOffsets(const API::ExperimentInfo &ws, + const std::string &filename, + const std::string &groupName, + const std::vector<std::string> &bankNames) { + // Read an event ID for each bank. This is always a detector ID since the + // parallel loader is disabled otherwise. It is assumed that detector IDs + // within a bank are contiguous. + const auto &idToBank = Parallel::IO::EventLoader::makeAnyEventIdToBankMap( + filename, groupName, bankNames); + + const auto &detInfo = ws.detectorInfo(); + const auto &detIds = detInfo.detectorIDs(); + int32_t spectrumIndex{0}; // *global* index + std::vector<int32_t> bankOffsets(bankNames.size(), 0); + for (size_t i = 0; i < detInfo.size(); ++i) { + // Used only in LoadEventNexus so we know there is a 1:1 mapping, omitting + // monitors. + if (!detInfo.isMonitor(i)) { + detid_t detId = detIds[i]; + // The offset is the difference between the event ID and the spectrum + // index and can then be used to translate from the former to the latter + // by simple subtraction. If no eventId could be read for a bank it + // implies that there are no events, so any offset will do since it is + // unused, keeping as initialized to 0 above. + if (idToBank.count(detId) == 1) { + size_t bank = idToBank.at(detId); + bankOffsets[bank] = detId - spectrumIndex; + } + spectrumIndex++; + } + } + return bankOffsets; +} + +/// Load events from given banks into given EventWorkspace. +void ParallelEventLoader::load(DataObjects::EventWorkspace &ws, + const std::string &filename, + const std::string &groupName, + const std::vector<std::string> &bankNames) { + const size_t size = ws.getNumberHistograms(); + std::vector<std::vector<Types::Event::TofEvent> *> eventLists(size, nullptr); + for (size_t i = 0; i < size; ++i) + DataObjects::getEventsFrom(ws.getSpectrum(i), eventLists[i]); + + Parallel::IO::EventLoader::load( + ws.indexInfo().communicator(), filename, groupName, bankNames, + bankOffsets(ws, filename, groupName, bankNames), std::move(eventLists)); +} + +} // namespace DataHandling +} // namespace Mantid diff --git a/Framework/DataHandling/src/ProcessBankData.cpp b/Framework/DataHandling/src/ProcessBankData.cpp index 3e8d963b7814147fd6fd71a3a5c87f170bab54cd..5b93092bf1265be928b3de789fe5dd286536e513 100644 --- a/Framework/DataHandling/src/ProcessBankData.cpp +++ b/Framework/DataHandling/src/ProcessBankData.cpp @@ -1,3 +1,5 @@ +#include "MantidDataHandling/DefaultEventLoader.h" +#include "MantidDataHandling/LoadEventNexus.h" #include "MantidDataHandling/ProcessBankData.h" using namespace Mantid::DataObjects; @@ -7,16 +9,16 @@ namespace Mantid { namespace DataHandling { ProcessBankData::ProcessBankData( - LoadEventNexus *alg, std::string entry_name, API::Progress *prog, + DefaultEventLoader &m_loader, std::string entry_name, API::Progress *prog, boost::shared_array<uint32_t> event_id, boost::shared_array<float> event_time_of_flight, size_t numEvents, size_t startAt, boost::shared_ptr<std::vector<uint64_t>> event_index, boost::shared_ptr<BankPulseTimes> thisBankPulseTimes, bool have_weight, boost::shared_array<float> event_weight, detid_t min_event_id, detid_t max_event_id) - : Task(), alg(alg), entry_name(entry_name), - pixelID_to_wi_vector(alg->pixelID_to_wi_vector), - pixelID_to_wi_offset(alg->pixelID_to_wi_offset), prog(prog), + : Task(), m_loader(m_loader), entry_name(entry_name), + pixelID_to_wi_vector(m_loader.pixelID_to_wi_vector), + pixelID_to_wi_offset(m_loader.pixelID_to_wi_offset), prog(prog), event_id(event_id), event_time_of_flight(event_time_of_flight), numEvents(numEvents), startAt(startAt), event_index(event_index), thisBankPulseTimes(thisBankPulseTimes), have_weight(have_weight), @@ -26,7 +28,6 @@ ProcessBankData::ProcessBankData( m_cost = static_cast<double>(numEvents); } -//---------------------------------------------------------------------------------------------- /** Run the data processing * FIXME/TODO - split run() into readable methods */ @@ -41,8 +42,9 @@ void ProcessBankData::run() { // override { prog->report(entry_name + ": precount"); // ---- Pre-counting events per pixel ID ---- - auto &outputWS = *(alg->m_ws); - if (alg->precount) { + auto &outputWS = m_loader.m_ws; + auto *alg = m_loader.alg; + if (m_loader.precount) { std::vector<size_t> counts(m_max_id - m_min_id + 1, 0); for (size_t i = 0; i < numEvents; i++) { @@ -155,8 +157,7 @@ void ProcessBankData::run() { // override { if (have_weight) { double weight = static_cast<double>(event_weight[i]); double errorSq = weight * weight; - LoadEventNexus::WeightedEventVector_pt eventVector = - alg->weightedEventVectors[periodIndex][detId]; + auto *eventVector = m_loader.weightedEventVectors[periodIndex][detId]; // NULL eventVector indicates a bad spectrum lookup if (eventVector) { eventVector->emplace_back(tof, pulsetime, weight, errorSq); @@ -165,8 +166,7 @@ void ProcessBankData::run() { // override { } } else { // We have cached the vector of events for this detector ID - std::vector<Mantid::Types::Event::TofEvent> *eventVector = - alg->eventVectors[periodIndex][detId]; + auto *eventVector = m_loader.eventVectors[periodIndex][detId]; // NULL eventVector indicates a bad spectrum lookup if (eventVector) { eventVector->emplace_back(tof, pulsetime); diff --git a/Framework/DataHandling/test/CMakeLists.txt b/Framework/DataHandling/test/CMakeLists.txt index 0cfd3daaa25d7827b86d9912c986514fbe33aa7b..21fa10afd2bd46ceffb066d5b47a2de27627f20c 100644 --- a/Framework/DataHandling/test/CMakeLists.txt +++ b/Framework/DataHandling/test/CMakeLists.txt @@ -12,6 +12,7 @@ if ( CXXTEST_FOUND ) ../../TestHelpers/src/TearDownWorld.cpp ../../TestHelpers/src/WorkspaceCreationHelper.cpp ../../TestHelpers/src/NexusTestHelper.cpp + ../../TestHelpers/src/ParallelRunner.cpp NXcanSASTestHelper.cpp ) diff --git a/Framework/DataHandling/test/EventWorkspaceCollectionTest.h b/Framework/DataHandling/test/EventWorkspaceCollectionTest.h index d5fd5fafa8328c01620d8015cf71433afd946e7e..e3c7e461e0a5c30935c170152af20e29d9b204d6 100644 --- a/Framework/DataHandling/test/EventWorkspaceCollectionTest.h +++ b/Framework/DataHandling/test/EventWorkspaceCollectionTest.h @@ -12,7 +12,9 @@ #include "MantidDataObjects/EventWorkspace.h" #include "MantidAPI/Sample.h" #include "MantidAPI/WorkspaceGroup.h" +#include "MantidIndexing/IndexInfo.h" +using namespace Mantid; using namespace Mantid::DataHandling; using namespace Mantid::DataObjects; using namespace Mantid::API; @@ -143,6 +145,28 @@ public: memberWS->sample().getWidth()); } } + + void test_setIndexInfo() { + EventWorkspaceCollection collection; + auto periodLog = make_unique<const TimeSeriesProperty<int>>("period_log"); + const size_t periods = 2; + collection.setNPeriods(periods, periodLog); + // Set some arbitrary data to ensure that it is preserved. + const float thickness = static_cast<float>(1.23); + collection.setThickness(thickness); + + collection.setIndexInfo(Indexing::IndexInfo({3, 1, 2})); + const auto ws = boost::dynamic_pointer_cast<WorkspaceGroup>( + collection.combinedWorkspace()); + for (size_t i = 0; i < periods; ++i) { + auto eventWS = + boost::dynamic_pointer_cast<EventWorkspace>(ws->getItem(i)); + TS_ASSERT_EQUALS(eventWS->getSpectrum(0).getSpectrumNo(), 3); + TS_ASSERT_EQUALS(eventWS->getSpectrum(1).getSpectrumNo(), 1); + TS_ASSERT_EQUALS(eventWS->getSpectrum(2).getSpectrumNo(), 2); + TS_ASSERT_EQUALS(eventWS->sample().getThickness(), thickness); + } + } }; #endif /* MANTID_DATAHANDLING_EventWorkspaceCollectionTEST_H_ */ diff --git a/Framework/DataHandling/test/LoadEventNexusIndexSetupTest.h b/Framework/DataHandling/test/LoadEventNexusIndexSetupTest.h new file mode 100644 index 0000000000000000000000000000000000000000..59e54957b962eee40e01422fb67c88b6a5357e7b --- /dev/null +++ b/Framework/DataHandling/test/LoadEventNexusIndexSetupTest.h @@ -0,0 +1,365 @@ +#ifndef MANTID_DATAHANDLING_LOADEVENTNEXUSINDEXSETUPTEST_H_ +#define MANTID_DATAHANDLING_LOADEVENTNEXUSINDEXSETUPTEST_H_ + +#include <cxxtest/TestSuite.h> +#include "MantidTestHelpers/FakeObjects.h" + +#include "MantidDataHandling/LoadEventNexusIndexSetup.h" +#include "MantidDataObjects/WorkspaceCreation.h" +#include "MantidGeometry/Instrument.h" +#include "MantidGeometry/Instrument/Detector.h" +#include "MantidIndexing/SpectrumNumber.h" +#include "MantidTypes/SpectrumDefinition.h" + +using namespace Mantid; +using namespace API; +using namespace Geometry; +using namespace DataObjects; +using namespace DataHandling; +using namespace Indexing; + +class LoadEventNexusIndexSetupTest : public CxxTest::TestSuite { +public: + // This pair of boilerplate methods prevent the suite being created statically + // This means the constructor isn't called when running other tests + static LoadEventNexusIndexSetupTest *createSuite() { + return new LoadEventNexusIndexSetupTest(); + } + static void destroySuite(LoadEventNexusIndexSetupTest *suite) { + delete suite; + } + + LoadEventNexusIndexSetupTest() { + auto instrument = boost::make_shared<Instrument>(); + // Create instrument with gap in detector ID range + for (auto detID : {1, 2, 11, 12}) { + auto *det = new Detector("det-" + std::to_string(detID), detID, nullptr); + instrument->add(det); + instrument->markAsDetector(det); + } + auto *mon = new Detector("monitor", 666, nullptr); + instrument->add(mon); + instrument->markAsMonitor(mon); + m_ws = create<WorkspaceTester>(instrument, 1, HistogramData::BinEdges(2)); + } + + void test_construct() { + LoadEventNexusIndexSetup(m_ws, EMPTY_INT(), EMPTY_INT(), {}); + } + + void test_makeIndexInfo_no_filter() { + LoadEventNexusIndexSetup indexSetup(m_ws, EMPTY_INT(), EMPTY_INT(), {}); + const auto indexInfo = indexSetup.makeIndexInfo(); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().first, EMPTY_INT()); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().second, EMPTY_INT()); + TS_ASSERT_EQUALS(indexInfo.size(), 4); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(0), SpectrumNumber(1)); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(1), SpectrumNumber(2)); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(2), SpectrumNumber(3)); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(3), SpectrumNumber(4)); + const auto specDefs = indexInfo.spectrumDefinitions(); + TS_ASSERT_EQUALS(specDefs->at(0), SpectrumDefinition(0)); + TS_ASSERT_EQUALS(specDefs->at(1), SpectrumDefinition(1)); + TS_ASSERT_EQUALS(specDefs->at(2), SpectrumDefinition(2)); + TS_ASSERT_EQUALS(specDefs->at(3), SpectrumDefinition(3)); + } + + void test_makeIndexInfo_min_out_of_range() { + for (const auto min : {0, 3, 13}) { + LoadEventNexusIndexSetup indexSetup(m_ws, min, EMPTY_INT(), {}); + TS_ASSERT_THROWS(indexSetup.makeIndexInfo(), std::out_of_range); + } + } + + void test_makeIndexInfo_max_out_of_range() { + for (const auto max : {0, 3, 13}) { + LoadEventNexusIndexSetup indexSetup(m_ws, EMPTY_INT(), max, {}); + TS_ASSERT_THROWS(indexSetup.makeIndexInfo(), std::out_of_range); + } + } + + void test_makeIndexInfo_range_out_of_range() { + for (const auto i : {0, 3, 13}) { + LoadEventNexusIndexSetup indexSetup(m_ws, EMPTY_INT(), EMPTY_INT(), {i}); + TS_ASSERT_THROWS(indexSetup.makeIndexInfo(), std::out_of_range); + } + } + + void test_makeIndexInfo_range_includes_monitor() { + LoadEventNexusIndexSetup indexSetup(m_ws, EMPTY_INT(), EMPTY_INT(), {666}); + TS_ASSERT_THROWS(indexSetup.makeIndexInfo(), std::out_of_range); + } + + void test_makeIndexInfo_min() { + LoadEventNexusIndexSetup indexSetup(m_ws, 11, EMPTY_INT(), {}); + const auto indexInfo = indexSetup.makeIndexInfo(); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().first, 11); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().second, 12); + TS_ASSERT_EQUALS(indexInfo.size(), 2); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(0), SpectrumNumber(3)); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(1), SpectrumNumber(4)); + const auto specDefs = indexInfo.spectrumDefinitions(); + // Old behavior would have given detector indices 1 and 2 (instead of 2 and + // 3), mapping to detector IDs 2 and 11, instead of the requested 11 and 12. + TS_ASSERT_EQUALS(specDefs->at(0), SpectrumDefinition(2)); + TS_ASSERT_EQUALS(specDefs->at(1), SpectrumDefinition(3)); + } + + void test_makeIndexInfo_min_crossing_gap() { + LoadEventNexusIndexSetup indexSetup(m_ws, 2, EMPTY_INT(), {}); + const auto indexInfo = indexSetup.makeIndexInfo(); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().first, 2); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().second, 12); + // Note that we are NOT creating spectra for the gap between IDs 2 and 11, + // contrary to the behavior of the old index setup code. + TS_ASSERT_EQUALS(indexInfo.size(), 3); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(0), SpectrumNumber(2)); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(1), SpectrumNumber(3)); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(2), SpectrumNumber(4)); + const auto specDefs = indexInfo.spectrumDefinitions(); + TS_ASSERT_EQUALS(specDefs->at(0), SpectrumDefinition(1)); + TS_ASSERT_EQUALS(specDefs->at(1), SpectrumDefinition(2)); + TS_ASSERT_EQUALS(specDefs->at(2), SpectrumDefinition(3)); + } + + void test_makeIndexInfo_min_max() { + LoadEventNexusIndexSetup indexSetup(m_ws, 2, 11, {}); + const auto indexInfo = indexSetup.makeIndexInfo(); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().first, 2); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().second, 11); + TS_ASSERT_EQUALS(indexInfo.size(), 2); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(0), SpectrumNumber(2)); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(1), SpectrumNumber(3)); + const auto specDefs = indexInfo.spectrumDefinitions(); + TS_ASSERT_EQUALS(specDefs->at(0), SpectrumDefinition(1)); + TS_ASSERT_EQUALS(specDefs->at(1), SpectrumDefinition(2)); + } + + void test_makeIndexInfo_range() { + LoadEventNexusIndexSetup indexSetup(m_ws, EMPTY_INT(), EMPTY_INT(), + {2, 11}); + const auto indexInfo = indexSetup.makeIndexInfo(); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().first, 2); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().second, 11); + TS_ASSERT_EQUALS(indexInfo.size(), 2); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(0), SpectrumNumber(2)); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(1), SpectrumNumber(3)); + const auto specDefs = indexInfo.spectrumDefinitions(); + TS_ASSERT_EQUALS(specDefs->at(0), SpectrumDefinition(1)); + TS_ASSERT_EQUALS(specDefs->at(1), SpectrumDefinition(2)); + } + + void test_makeIndexInfo_min_range() { + LoadEventNexusIndexSetup indexSetup(m_ws, 11, EMPTY_INT(), {1}); + const auto indexInfo = indexSetup.makeIndexInfo(); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().first, 1); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().second, 12); + TS_ASSERT_EQUALS(indexInfo.size(), 3); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(0), SpectrumNumber(1)); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(1), SpectrumNumber(3)); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(2), SpectrumNumber(4)); + const auto specDefs = indexInfo.spectrumDefinitions(); + TS_ASSERT_EQUALS(specDefs->at(0), SpectrumDefinition(0)); + TS_ASSERT_EQUALS(specDefs->at(1), SpectrumDefinition(2)); + TS_ASSERT_EQUALS(specDefs->at(2), SpectrumDefinition(3)); + } + + void test_makeIndexInfo_min_max_range() { + LoadEventNexusIndexSetup indexSetup(m_ws, 2, 11, {1}); + const auto indexInfo = indexSetup.makeIndexInfo(); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().first, 1); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().second, 11); + TS_ASSERT_EQUALS(indexInfo.size(), 3); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(0), SpectrumNumber(1)); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(1), SpectrumNumber(2)); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(2), SpectrumNumber(3)); + const auto specDefs = indexInfo.spectrumDefinitions(); + TS_ASSERT_EQUALS(specDefs->at(0), SpectrumDefinition(0)); + TS_ASSERT_EQUALS(specDefs->at(1), SpectrumDefinition(1)); + TS_ASSERT_EQUALS(specDefs->at(2), SpectrumDefinition(2)); + } + + void test_makeIndexInfo_from_bank() { + LoadEventNexusIndexSetup indexSetup(m_ws, EMPTY_INT(), EMPTY_INT(), {}); + const auto indexInfo = indexSetup.makeIndexInfo({"det-2", "det-12"}); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().first, EMPTY_INT()); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().second, EMPTY_INT()); + TS_ASSERT_EQUALS(indexInfo.size(), 2); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(0), SpectrumNumber(2)); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(1), SpectrumNumber(4)); + const auto specDefs = indexInfo.spectrumDefinitions(); + TS_ASSERT_EQUALS(specDefs->at(0), SpectrumDefinition(1)); + TS_ASSERT_EQUALS(specDefs->at(1), SpectrumDefinition(3)); + } + + void test_makeIndexInfo_from_bank_filter_ignored() { + LoadEventNexusIndexSetup indexSetup(m_ws, 12, EMPTY_INT(), {1}); + // This variant ignores any filter in the index/workspace setup phase, + // consistent with old behavior. Note that a filter for min/max does however + // apply when loading actual events in ProcessBankData (range is still + // ignored though). + const auto indexInfo = indexSetup.makeIndexInfo({"det-2", "det-12"}); + // Filter ignored, make sure also limits are set correctly. + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().first, EMPTY_INT()); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().second, EMPTY_INT()); + TS_ASSERT_EQUALS(indexInfo.size(), 2); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(0), SpectrumNumber(2)); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(1), SpectrumNumber(4)); + const auto specDefs = indexInfo.spectrumDefinitions(); + TS_ASSERT_EQUALS(specDefs->at(0), SpectrumDefinition(1)); + TS_ASSERT_EQUALS(specDefs->at(1), SpectrumDefinition(3)); + } + + void test_makeIndexInfo_from_isis_spec_udet() { + LoadEventNexusIndexSetup indexSetup(m_ws, EMPTY_INT(), EMPTY_INT(), {}); + auto spec = {4, 3, 2, 1}; + auto udet = {2, 1, 12, 11}; + const auto indexInfo = indexSetup.makeIndexInfo({spec, udet}, false); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().first, EMPTY_INT()); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().second, EMPTY_INT()); + TS_ASSERT_EQUALS(indexInfo.size(), 4); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(0), SpectrumNumber(1)); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(1), SpectrumNumber(2)); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(2), SpectrumNumber(3)); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(3), SpectrumNumber(4)); + const auto specDefs = indexInfo.spectrumDefinitions(); + TS_ASSERT_EQUALS(specDefs->at(0), SpectrumDefinition(2)); + TS_ASSERT_EQUALS(specDefs->at(1), SpectrumDefinition(3)); + TS_ASSERT_EQUALS(specDefs->at(2), SpectrumDefinition(0)); + TS_ASSERT_EQUALS(specDefs->at(3), SpectrumDefinition(1)); + } + + void test_makeIndexInfo_from_isis_spec_udet_grouped() { + LoadEventNexusIndexSetup indexSetup(m_ws, EMPTY_INT(), EMPTY_INT(), {}); + auto spec = {1, 2, 1, 2}; + auto udet = {1, 2, 11, 12}; + const auto indexInfo = indexSetup.makeIndexInfo({spec, udet}, false); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().first, EMPTY_INT()); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().second, EMPTY_INT()); + TS_ASSERT_EQUALS(indexInfo.size(), 2); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(0), SpectrumNumber(1)); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(1), SpectrumNumber(2)); + const auto specDefs = indexInfo.spectrumDefinitions(); + SpectrumDefinition group_1_11; + group_1_11.add(0); + group_1_11.add(2); + TS_ASSERT_EQUALS(specDefs->at(0), group_1_11); + SpectrumDefinition group_2_12; + group_2_12.add(1); + group_2_12.add(3); + TS_ASSERT_EQUALS(specDefs->at(1), group_2_12); + } + + void test_makeIndexInfo_from_isis_spec_udet_unknown_detector_ids() { + LoadEventNexusIndexSetup indexSetup(m_ws, EMPTY_INT(), EMPTY_INT(), {}); + auto spec = {1, 2}; + auto udet = {1, 100}; + const auto indexInfo = indexSetup.makeIndexInfo({spec, udet}, false); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().first, EMPTY_INT()); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().second, EMPTY_INT()); + TS_ASSERT_EQUALS(indexInfo.size(), 2); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(0), SpectrumNumber(1)); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(1), SpectrumNumber(2)); + const auto specDefs = indexInfo.spectrumDefinitions(); + TS_ASSERT_EQUALS(specDefs->at(0), SpectrumDefinition(0)); + // ID 100 does not exist so SpectrumDefinition is empty + TS_ASSERT_EQUALS(specDefs->at(1), SpectrumDefinition()); + } + + void test_makeIndexInfo_from_isis_spec_udet_min() { + LoadEventNexusIndexSetup indexSetup(m_ws, 3, EMPTY_INT(), {}); + auto spec = {4, 3, 2, 1}; + auto udet = {2, 1, 12, 11}; + const auto indexInfo = indexSetup.makeIndexInfo({spec, udet}, false); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().first, 3); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().second, 4); + TS_ASSERT_EQUALS(indexInfo.size(), 2); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(0), SpectrumNumber(3)); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(1), SpectrumNumber(4)); + const auto specDefs = indexInfo.spectrumDefinitions(); + TS_ASSERT_EQUALS(specDefs->at(0), SpectrumDefinition(0)); + TS_ASSERT_EQUALS(specDefs->at(1), SpectrumDefinition(1)); + } + + void test_makeIndexInfo_from_isis_spec_udet_min_max() { + LoadEventNexusIndexSetup indexSetup(m_ws, 2, 3, {}); + auto spec = {4, 3, 2, 1}; + auto udet = {2, 1, 12, 11}; + const auto indexInfo = indexSetup.makeIndexInfo({spec, udet}, false); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().first, 2); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().second, 3); + TS_ASSERT_EQUALS(indexInfo.size(), 2); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(0), SpectrumNumber(2)); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(1), SpectrumNumber(3)); + const auto specDefs = indexInfo.spectrumDefinitions(); + TS_ASSERT_EQUALS(specDefs->at(0), SpectrumDefinition(3)); + TS_ASSERT_EQUALS(specDefs->at(1), SpectrumDefinition(0)); + } + + void test_makeIndexInfo_from_isis_spec_udet_range() { + LoadEventNexusIndexSetup indexSetup(m_ws, EMPTY_INT(), EMPTY_INT(), {1}); + auto spec = {4, 3, 2, 1}; + auto udet = {2, 1, 12, 11}; + const auto indexInfo = indexSetup.makeIndexInfo({spec, udet}, false); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().first, 1); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().second, 1); + TS_ASSERT_EQUALS(indexInfo.size(), 1); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(0), SpectrumNumber(1)); + const auto specDefs = indexInfo.spectrumDefinitions(); + TS_ASSERT_EQUALS(specDefs->at(0), SpectrumDefinition(2)); + } + + void test_makeIndexInfo_from_isis_spec_udet_min_max_range() { + LoadEventNexusIndexSetup indexSetup(m_ws, 2, 2, {1}); + auto spec = {4, 3, 2, 1}; + auto udet = {2, 1, 12, 11}; + const auto indexInfo = indexSetup.makeIndexInfo({spec, udet}, false); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().first, 1); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().second, 2); + TS_ASSERT_EQUALS(indexInfo.size(), 2); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(0), SpectrumNumber(1)); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(1), SpectrumNumber(2)); + const auto specDefs = indexInfo.spectrumDefinitions(); + TS_ASSERT_EQUALS(specDefs->at(0), SpectrumDefinition(2)); + TS_ASSERT_EQUALS(specDefs->at(1), SpectrumDefinition(3)); + } + + void test_makeIndexInfo_from_isis_spec_udet_range_includes_monitor() { + LoadEventNexusIndexSetup indexSetup(m_ws, EMPTY_INT(), EMPTY_INT(), {1}); + auto spec = {1}; + auto udet = {666}; + TS_ASSERT_THROWS(indexSetup.makeIndexInfo({spec, udet}, false), + std::out_of_range); + } + + void test_makeIndexInfo_from_isis_spec_udet_monitors() { + LoadEventNexusIndexSetup indexSetup(m_ws, EMPTY_INT(), EMPTY_INT(), {}); + auto spec = {1, 2, 3, 4, 5}; + auto udet = {1, 2, 11, 12, 666}; + const auto indexInfo = indexSetup.makeIndexInfo({spec, udet}, true); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().first, EMPTY_INT()); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().second, EMPTY_INT()); + TS_ASSERT_EQUALS(indexInfo.size(), 1); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(0), SpectrumNumber(5)); + const auto specDefs = indexInfo.spectrumDefinitions(); + TS_ASSERT_EQUALS(specDefs->at(0), SpectrumDefinition(4)); + } + + void test_makeIndexInfo_from_isis_spec_udet_monitors_ignores_min_max_range() { + LoadEventNexusIndexSetup indexSetup(m_ws, 2, 3, {4}); + auto spec = {1, 2, 3, 4, 5}; + auto udet = {1, 2, 11, 12, 666}; + const auto indexInfo = indexSetup.makeIndexInfo({spec, udet}, true); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().first, 2); + TS_ASSERT_EQUALS(indexSetup.eventIDLimits().second, 3); + TS_ASSERT_EQUALS(indexInfo.size(), 1); + TS_ASSERT_EQUALS(indexInfo.spectrumNumber(0), SpectrumNumber(5)); + const auto specDefs = indexInfo.spectrumDefinitions(); + TS_ASSERT_EQUALS(specDefs->at(0), SpectrumDefinition(4)); + } + +private: + MatrixWorkspace_sptr m_ws; +}; + +#endif /* MANTID_DATAHANDLING_LOADEVENTNEXUSINDEXSETUPTEST_H_ */ diff --git a/Framework/DataHandling/test/LoadEventNexusTest.h b/Framework/DataHandling/test/LoadEventNexusTest.h index 13405636342955c389981aeaf3883fdf73ae7caa..026bc70a5c0c75112fb6c819033a7b6d3b2e2da0 100644 --- a/Framework/DataHandling/test/LoadEventNexusTest.h +++ b/Framework/DataHandling/test/LoadEventNexusTest.h @@ -12,8 +12,17 @@ #include "MantidKernel/Property.h" #include "MantidKernel/TimeSeriesProperty.h" #include "MantidDataHandling/LoadEventNexus.h" +#include "MantidIndexing/IndexInfo.h" +#include "MantidIndexing/SpectrumIndexSet.h" +#include "MantidIndexing/SpectrumNumber.h" +#include "MantidParallel/Collectives.h" +#include "MantidParallel/Communicator.h" +#include "MantidTestHelpers/ParallelAlgorithmCreation.h" +#include "MantidTestHelpers/ParallelRunner.h" + #include <cxxtest/TestSuite.h> +using namespace Mantid; using namespace Mantid::Geometry; using namespace Mantid::API; using namespace Mantid::DataObjects; @@ -22,6 +31,80 @@ using namespace Mantid::DataHandling; using Mantid::Types::Core::DateAndTime; using Mantid::Types::Event::TofEvent; +namespace { +boost::shared_ptr<const EventWorkspace> +load_reference_workspace(const std::string &filename) { + // Construct default communicator *without* threading backend. In non-MPI run + // (such as when running unit tests) this will thus just be a communicator + // containing a single rank, independently on all ranks, which is what we want + // for default loading bhavior. + Parallel::Communicator comm; + auto alg = ParallelTestHelpers::create<LoadEventNexus>(comm); + alg->setProperty("Filename", filename); + alg->setProperty("LoadLogs", false); + TS_ASSERT_THROWS_NOTHING(alg->execute()); + TS_ASSERT(alg->isExecuted()); + Workspace_const_sptr out = alg->getProperty("OutputWorkspace"); + return boost::dynamic_pointer_cast<const EventWorkspace>(out); +} + +void run_MPI_load(const Parallel::Communicator &comm, + boost::shared_ptr<std::mutex> mutex) { + boost::shared_ptr<const EventWorkspace> reference; + boost::shared_ptr<const EventWorkspace> eventWS; + { + std::lock_guard<std::mutex> lock(*mutex); + const std::string filename("CNCS_7860_event.nxs"); + reference = load_reference_workspace(filename); + auto alg = ParallelTestHelpers::create<LoadEventNexus>(comm); + alg->setProperty("Filename", filename); + alg->setProperty("LoadLogs", false); + TS_ASSERT_THROWS_NOTHING(alg->execute()); + TS_ASSERT(alg->isExecuted()); + Workspace_const_sptr out = alg->getProperty("OutputWorkspace"); + if (comm.size() != 1) { + TS_ASSERT_EQUALS(out->storageMode(), Parallel::StorageMode::Distributed); + } + eventWS = boost::dynamic_pointer_cast<const EventWorkspace>(out); + } + const size_t localSize = eventWS->getNumberHistograms(); + auto localEventCount = eventWS->getNumberEvents(); + std::vector<size_t> localSizes; + std::vector<size_t> localEventCounts; + Parallel::gather(comm, localSize, localSizes, 0); + Parallel::gather(comm, localEventCount, localEventCounts, 0); + if (comm.rank() == 0) { + TS_ASSERT_EQUALS(std::accumulate(localSizes.begin(), localSizes.end(), + static_cast<size_t>(0)), + static_cast<size_t>(51200)); + TS_ASSERT_EQUALS(std::accumulate(localEventCounts.begin(), + localEventCounts.end(), + static_cast<size_t>(0)), + static_cast<size_t>(112266)); + } + + const auto &indexInfo = eventWS->indexInfo(); + size_t localCompared = 0; + for (size_t i = 0; i < reference->getNumberHistograms(); ++i) { + for (const auto &index : + indexInfo.makeIndexSet({static_cast<Indexing::SpectrumNumber>( + reference->getSpectrum(i).getSpectrumNo())})) { + TS_ASSERT_EQUALS(eventWS->getSpectrum(index), reference->getSpectrum(i)); + ++localCompared; + } + } + // Consistency check: Make sure we really compared all spectra (protects + // against missing spectrum numbers or inconsistent mapping in IndexInfo). + std::vector<size_t> compared; + Parallel::gather(comm, localCompared, compared, 0); + if (comm.rank() == 0) { + TS_ASSERT_EQUALS(std::accumulate(compared.begin(), compared.end(), + static_cast<size_t>(0)), + reference->getNumberHistograms()); + } +} +} + class LoadEventNexusTest : public CxxTest::TestSuite { private: void @@ -248,14 +331,12 @@ public: TSM_ASSERT("The number of spectra in the workspace should be equal to the " "spectra filtered", outWs->getNumberHistograms() == specList.size()); - TSM_ASSERT("Some spectra were not found in the workspace", - outWs->getSpectrum(0).getSpectrumNo() == 13); - TSM_ASSERT("Some spectra were not found in the workspace", - outWs->getSpectrum(1).getSpectrumNo() == 16); - TSM_ASSERT("Some spectra were not found in the workspace", - outWs->getSpectrum(2).getSpectrumNo() == 21); - TSM_ASSERT("Some spectra were not found in the workspace", - outWs->getSpectrum(3).getSpectrumNo() == 28); + // Spectrum numbers match those that same detector would have in unfiltered + // load, in this case detID + 1 since IDs in instrument start at 0. + TS_ASSERT_EQUALS(outWs->getSpectrum(0).getSpectrumNo(), 14); + TS_ASSERT_EQUALS(outWs->getSpectrum(1).getSpectrumNo(), 17); + TS_ASSERT_EQUALS(outWs->getSpectrum(2).getSpectrumNo(), 22); + TS_ASSERT_EQUALS(outWs->getSpectrum(3).getSpectrumNo(), 29); // B) test SpectrumMin and SpectrumMax wsName = "test_partial_spectra_loading_SpectrumMin_SpectrumMax"; @@ -276,9 +357,11 @@ public: // check number and indices of spectra const size_t numSpecs = specMax - specMin + 1; TS_ASSERT_EQUALS(outWs->getNumberHistograms(), numSpecs); + // Spectrum numbers match those that same detector would have in unfiltered + // load, in this case detID + 1 since IDs in instrument start at 0. for (size_t specIdx = 0; specIdx < numSpecs; specIdx++) { TS_ASSERT_EQUALS(outWs->getSpectrum(specIdx).getSpectrumNo(), - static_cast<int>(specMin + specIdx)); + static_cast<int>(specMin + specIdx + 1)); } // C) test SpectrumList + SpectrumMin and SpectrumMax @@ -312,12 +395,14 @@ public: // check number and indices of spectra const size_t n = sMax - sMin + 1; // this n is the 20...22, excluding '17' TS_ASSERT_EQUALS(outWs->getNumberHistograms(), n + 1); // +1 is the '17' - // 17 should come from SpectrumList - TS_ASSERT_EQUALS(outWs->getSpectrum(0).getSpectrumNo(), 17); + // Spectrum numbers match those that same detector would have in unfiltered + // load, in this case detID + 1 since IDs in instrument start at 0. + // 18 should come from SpectrumList + TS_ASSERT_EQUALS(outWs->getSpectrum(0).getSpectrumNo(), 18); // and then sMin(20)...sMax(22) for (size_t specIdx = 0; specIdx < n; specIdx++) { TS_ASSERT_EQUALS(outWs->getSpectrum(specIdx + 1).getSpectrumNo(), - static_cast<int>(sMin + specIdx)); + static_cast<int>(sMin + specIdx + 1)); } } @@ -359,8 +444,8 @@ public: auto outWs2 = AnalysisDataService::Instance().retrieveWS<EventWorkspace>(wsName2); - TSM_ASSERT("The number of spectra in the workspace should be 12", - outWs->getNumberHistograms() == 12); + TSM_ASSERT_EQUALS("The number of spectra in the workspace should be 12", + outWs->getNumberHistograms(), 12); TSM_ASSERT_EQUALS("The number of events in the precount and not precount " "workspaces do not match", @@ -750,6 +835,15 @@ public: } } + void test_MPI_load() { + int threads = 3; // Limited number of threads to avoid long running test. + ParallelTestHelpers::ParallelRunner runner(threads); + // Test reads from multiple threads, which is not supported by our HDF5 + // libraries, so we need a mutex. + auto hdf5Mutex = boost::make_shared<std::mutex>(); + runner.run(run_MPI_load, hdf5Mutex); + } + private: std::string wsSpecFilterAndEventMonitors; }; diff --git a/Framework/DataObjects/inc/MantidDataObjects/EventList.h b/Framework/DataObjects/inc/MantidDataObjects/EventList.h index 8468f490acd264b76f304bfc51a624c197977e58..a7a5e3e49c345d37294e7dce90d3d68149aaac6f 100644 --- a/Framework/DataObjects/inc/MantidDataObjects/EventList.h +++ b/Framework/DataObjects/inc/MantidDataObjects/EventList.h @@ -84,6 +84,8 @@ public: ~EventList() override; + void copyDataFrom(const ISpectrum &source) override; + void createFromHistogram(const ISpectrum *inSpec, bool GenerateZeros, bool GenerateMultipleEvents, int MaxEventsPerBin); @@ -362,6 +364,10 @@ protected: void checkIsYAndEWritable() const override; private: + using ISpectrum::copyDataInto; + void copyDataInto(EventList &sink) const override; + void copyDataInto(Histogram1D &sink) const override; + const HistogramData::Histogram &histogramRef() const override { return m_histogram; } diff --git a/Framework/DataObjects/inc/MantidDataObjects/Histogram1D.h b/Framework/DataObjects/inc/MantidDataObjects/Histogram1D.h index 4537c851f9e2bca1fd85f92fe6b39000847d714d..dad0efa1b9181ebca2a2d05a5121ed5ad816799b 100644 --- a/Framework/DataObjects/inc/MantidDataObjects/Histogram1D.h +++ b/Framework/DataObjects/inc/MantidDataObjects/Histogram1D.h @@ -48,6 +48,8 @@ public: Histogram1D &operator=(Histogram1D &&) = default; Histogram1D &operator=(const ISpectrum &rhs); + void copyDataFrom(const ISpectrum &source) override; + void setX(const Kernel::cow_ptr<HistogramData::HistogramX> &X) override; MantidVec &dataX() override; const MantidVec &dataX() const override; @@ -85,6 +87,9 @@ public: } private: + using ISpectrum::copyDataInto; + void copyDataInto(Histogram1D &sink) const override; + void checkAndSanitizeHistogram(HistogramData::Histogram &histogram) override; const HistogramData::Histogram &histogramRef() const override { return m_histogram; diff --git a/Framework/DataObjects/inc/MantidDataObjects/WorkspaceCreation.h b/Framework/DataObjects/inc/MantidDataObjects/WorkspaceCreation.h index 7a3d895a65962c561b7996c1fcbf5202b00bcabf..a7e550fa8454130e57062d63118bb8809b7d332b 100644 --- a/Framework/DataObjects/inc/MantidDataObjects/WorkspaceCreation.h +++ b/Framework/DataObjects/inc/MantidDataObjects/WorkspaceCreation.h @@ -9,6 +9,9 @@ #include <type_traits> namespace Mantid { +namespace Indexing { +class IndexInfo; +} namespace Geometry { class Instrument; } @@ -142,9 +145,13 @@ MANTID_DATAOBJECTS_DLL void fixDistributionFlag(API::MatrixWorkspace &workspace, const HistogramData::Histogram &histArg); -MANTID_DATAOBJECTS_DLL void -initializeFromParent(const API::MatrixWorkspace &parent, - API::MatrixWorkspace &ws); +template <class T> struct IsIndexInfo { using type = std::false_type; }; +template <> struct IsIndexInfo<Indexing::IndexInfo> { + using type = std::true_type; +}; +template <class UseIndexInfo> +void initializeFromParent(const API::MatrixWorkspace &parent, + API::MatrixWorkspace &workspace); } /** This is the create() method that all the other create() methods call. @@ -181,7 +188,8 @@ std::unique_ptr<T> create(const P &parent, const IndexArg &indexArg, // future of WorkspaceFactory. ws->setInstrument(parent.getInstrument()); ws->initialize(indexArg, HistogramData::Histogram(histArg)); - detail::initializeFromParent(parent, *ws); + detail::initializeFromParent<typename detail::IsIndexInfo<IndexArg>::type>( + parent, *ws); // initializeFromParent sets the distribution flag to the same value as // parent. In case histArg is an actual Histogram that is not the correct // behavior so we have to set it back to the value given by histArg. diff --git a/Framework/DataObjects/src/EventList.cpp b/Framework/DataObjects/src/EventList.cpp index 917b57063dc0a12a74fd1c72b9ed9c7bd5f31a92..2ae4a697ad722d666c33a3dde492ef0a70dd4700 100644 --- a/Framework/DataObjects/src/EventList.cpp +++ b/Framework/DataObjects/src/EventList.cpp @@ -1,4 +1,5 @@ #include "MantidDataObjects/EventList.h" +#include "MantidDataObjects/Histogram1D.h" #include "MantidAPI/MatrixWorkspace.h" #include "MantidDataObjects/EventWorkspaceMRU.h" #include "MantidKernel/DateAndTime.h" @@ -190,6 +191,26 @@ EventList::~EventList() { // std::vector<TofEvent>().swap(events); //Trick to release the vector memory. } +/// Copy data from another EventList, via ISpectrum reference. +void EventList::copyDataFrom(const ISpectrum &source) { + source.copyDataInto(*this); +} + +/// Used by copyDataFrom for dynamic dispatch for its `source`. +void EventList::copyDataInto(EventList &sink) const { + sink.m_histogram = m_histogram; + sink.events = events; + sink.weightedEvents = weightedEvents; + sink.weightedEventsNoTime = weightedEventsNoTime; + sink.eventType = eventType; + sink.order = order; +} + +/// Used by Histogram1D::copyDataFrom for dynamic dispatch for `other`. +void EventList::copyDataInto(Histogram1D &sink) const { + sink.setHistogram(histogram()); +} + // -------------------------------------------------------------------------- /** Create an EventList from a histogram. This converts bins to weighted * events. diff --git a/Framework/DataObjects/src/EventWorkspace.cpp b/Framework/DataObjects/src/EventWorkspace.cpp index ddbe44ba71f4d9657f2041f26522bf4513fe5f83..f4e4007c187de2c092ec45e64c891f25bb7c0450 100644 --- a/Framework/DataObjects/src/EventWorkspace.cpp +++ b/Framework/DataObjects/src/EventWorkspace.cpp @@ -1,4 +1,5 @@ #include "MantidDataObjects/EventWorkspace.h" +#include "MantidAPI/Algorithm.h" #include "MantidAPI/ISpectrum.h" #include "MantidAPI/Progress.h" #include "MantidAPI/RefAxis.h" @@ -17,7 +18,6 @@ #include "MantidKernel/MultiThreaded.h" #include "MantidKernel/TimeSeriesProperty.h" -#include "MantidAPI/Algorithm.tcc" #include "tbb/parallel_for.h" #include <limits> #include <numeric> @@ -674,45 +674,6 @@ void EventWorkspace::getIntegratedSpectra(std::vector<double> &out, } // namespace DataObjects } // namespace Mantid -// Explicit Instantiations of IndexProperty Methods in Algorithm -namespace Mantid { -namespace API { -template DLLExport void -Algorithm::declareWorkspaceInputProperties<DataObjects::EventWorkspace>( - const std::string &propertyName, const int allowedIndexTypes, - PropertyMode::Type optional, LockMode::Type lock, const std::string &doc); - -template DLLExport void -Algorithm::setWorkspaceInputProperties<DataObjects::EventWorkspace, - std::vector<int>>( - const std::string &name, const DataObjects::EventWorkspace_sptr &wksp, - IndexType type, const std::vector<int> &list); - -template DLLExport void -Algorithm::setWorkspaceInputProperties<DataObjects::EventWorkspace, - std::string>( - const std::string &name, const DataObjects::EventWorkspace_sptr &wksp, - IndexType type, const std::string &list); - -template DLLExport void -Algorithm::setWorkspaceInputProperties<DataObjects::EventWorkspace, - std::vector<int>>( - const std::string &name, const std::string &wsName, IndexType type, - const std::vector<int> &list); - -template DLLExport void -Algorithm::setWorkspaceInputProperties<DataObjects::EventWorkspace, - std::string>(const std::string &name, - const std::string &wsName, - IndexType type, - const std::string &list); - -template DLLExport std::tuple<boost::shared_ptr<DataObjects::EventWorkspace>, - Indexing::SpectrumIndexSet> -Algorithm::getWorkspaceAndIndices(const std::string &name) const; -} // namespace API -} // namespace Mantid - namespace Mantid { namespace Kernel { template <> diff --git a/Framework/DataObjects/src/Histogram1D.cpp b/Framework/DataObjects/src/Histogram1D.cpp index 31971b661c9ad9d3f9f1f585b88aba6fb4fe43fd..12c2c710f4a3f9408f5b517baee96f24f618b867 100644 --- a/Framework/DataObjects/src/Histogram1D.cpp +++ b/Framework/DataObjects/src/Histogram1D.cpp @@ -31,6 +31,16 @@ Histogram1D &Histogram1D::operator=(const ISpectrum &rhs) { return *this; } +/// Copy data from a Histogram1D or EventList, via ISpectrum reference. +void Histogram1D::copyDataFrom(const ISpectrum &source) { + source.copyDataInto(*this); +} + +/// Used by copyDataFrom for dynamic dispatch for its `source`. +void Histogram1D::copyDataInto(Histogram1D &sink) const { + sink.m_histogram = m_histogram; +} + void Histogram1D::clearData() { MantidVec &yValues = this->dataY(); std::fill(yValues.begin(), yValues.end(), 0.0); diff --git a/Framework/DataObjects/src/WorkspaceCreation.cpp b/Framework/DataObjects/src/WorkspaceCreation.cpp index 92ecff01b8353a8025e330d6a02b29052576f128..3389c78de85144970ae8fcc09d4a5f15653826db 100644 --- a/Framework/DataObjects/src/WorkspaceCreation.cpp +++ b/Framework/DataObjects/src/WorkspaceCreation.cpp @@ -3,6 +3,7 @@ #include "MantidDataObjects/EventWorkspace.h" #include "MantidDataObjects/WorkspaceCreation.h" #include "MantidDataObjects/Workspace2D.h" +#include "MantidIndexing/IndexInfo.h" namespace Mantid { namespace DataObjects { @@ -34,18 +35,53 @@ template <> std::unique_ptr<API::HistoWorkspace> createConcreteHelper() { return {nullptr}; } +template <class UseIndexInfo> +void doInitializeFromParent(const API::MatrixWorkspace &parent, + API::MatrixWorkspace &workspace, + const bool differentSize) { + API::WorkspaceFactory::Instance().initializeFromParent(parent, workspace, + differentSize); +} + +/** Same as WorkspaceFactory::initializeFromParent, with modifications for + * changed IndexInfo. + * + * IndexInfo used for initialization this implies that the following data from + * the parent is not applicable (since no automatic mapping possible): + * - Bin masking + * - Spectrum numbers and detector ID grouping + * - Y axis + */ +template <> +void doInitializeFromParent<std::true_type>(const API::MatrixWorkspace &parent, + API::MatrixWorkspace &child, + const bool differentSize) { + // Ignore flag since with IndexInfo the size is the same but we nevertheless + // do not want to copy some data since spectrum order or definitions may have + // changed. This should take care of not copying bin masks and Y axis. + static_cast<void>(differentSize); + + const auto indexInfo = child.indexInfo(); + API::WorkspaceFactory::Instance().initializeFromParent(parent, child, true); + // Restore previously set IndexInfo of child, undo changes to spectrum numbers + // and detector ID grouping initializeFromParent does by default. This hack is + // not optimal performance wise but copying data between workspaces is too + // complicated and dangerous currently without using initializeFromParent. + child.setIndexInfo(indexInfo); +} + /** Initialize a MatrixWorkspace from its parent including instrument, unit, * number of spectra and Run * @brief initializeFromParent * @param parent * @param ws */ +template <class UseIndexInfo> void initializeFromParent(const API::MatrixWorkspace &parent, API::MatrixWorkspace &ws) { bool differentSize = (parent.x(0).size() != ws.x(0).size()) || (parent.y(0).size() != ws.y(0).size()); - API::WorkspaceFactory::Instance().initializeFromParent(parent, ws, - differentSize); + doInitializeFromParent<UseIndexInfo>(parent, ws, differentSize); // For EventWorkspace, `ws.y(0)` put entry 0 in the MRU. However, clients // would typically expect an empty MRU and fail to clear it. This dummy call // removes the entry from the MRU. @@ -58,6 +94,13 @@ void fixDistributionFlag(API::MatrixWorkspace &workspace, workspace.setDistribution(histArg.yMode() == HistogramData::Histogram::YMode::Frequencies); } + +template void MANTID_DATAOBJECTS_DLL +initializeFromParent<std::true_type>(const API::MatrixWorkspace &, + API::MatrixWorkspace &); +template void MANTID_DATAOBJECTS_DLL +initializeFromParent<std::false_type>(const API::MatrixWorkspace &, + API::MatrixWorkspace &); } } // namespace DataObjects } // namespace Mantid diff --git a/Framework/DataObjects/test/EventListTest.h b/Framework/DataObjects/test/EventListTest.h index 93b909c7efff214949b1ce4af59d0d3736aa8550..7ce3c4f4ad6ac77bf450c99d914adac205b2abbb 100644 --- a/Framework/DataObjects/test/EventListTest.h +++ b/Framework/DataObjects/test/EventListTest.h @@ -4,9 +4,11 @@ #include <cxxtest/TestSuite.h> #include "MantidDataObjects/EventList.h" #include "MantidDataObjects/EventWorkspace.h" +#include "MantidDataObjects/Histogram1D.h" #include "MantidKernel/Timer.h" #include "MantidKernel/CPUTimer.h" #include "MantidKernel/Unit.h" +#include "MantidKernel/make_unique.h" #include <boost/scoped_ptr.hpp> #include <cmath> @@ -55,6 +57,77 @@ public: el = EventList(mylist); } + void test_copyDataFrom() { + Histogram1D histogram{Histogram::XMode::Points, Histogram::YMode::Counts}; + histogram.setHistogram(Points(1), Counts(1)); + EventList eventList; + eventList.setHistogram(BinEdges{0.0, 2.0}); + eventList += TofEvent(1.0, 2); + std::unique_ptr<const ISpectrum> specHist = + Kernel::make_unique<Histogram1D>(histogram); + std::unique_ptr<const ISpectrum> specEvent = + Kernel::make_unique<EventList>(eventList); + std::unique_ptr<ISpectrum> target = make_unique<EventList>(); + + TS_ASSERT_THROWS_EQUALS(target->copyDataFrom(*specHist), + const std::runtime_error &e, std::string(e.what()), + "Incompatible types in ISpectrum::copyDataFrom"); + + TS_ASSERT_THROWS_NOTHING(target->copyDataFrom(*specEvent)); + TS_ASSERT(target->binEdges()); + TS_ASSERT_EQUALS(&target->binEdges()[0], &eventList.binEdges()[0]); + TS_ASSERT_EQUALS(target->counts()[0], 1.0); + } + + void test_copyDataFrom_does_not_copy_indices() { + EventList eventList; + eventList.setHistogram(BinEdges{0.0, 2.0}); + eventList += TofEvent(1.0, 2); + std::unique_ptr<const ISpectrum> specEvent = + Kernel::make_unique<EventList>(eventList); + std::unique_ptr<ISpectrum> target = make_unique<EventList>(); + target->setSpectrumNo(37); + target->setDetectorID(42); + + TS_ASSERT_THROWS_NOTHING(target->copyDataFrom(*specEvent)); + TS_ASSERT(target->binEdges()); + TS_ASSERT_EQUALS(&target->binEdges()[0], &eventList.binEdges()[0]); + TS_ASSERT_EQUALS(target->counts()[0], 1.0); + TS_ASSERT_EQUALS(target->getSpectrumNo(), 37); + TS_ASSERT_EQUALS(target->getDetectorIDs(), std::set<detid_t>{42}); + } + + void test_copyDataFrom_event_data_details() { + EventList eventList; + eventList.setHistogram(BinEdges{0.0, 2.0}); + eventList += TofEvent(1.0, 2); + EventList target; + + target.copyDataFrom(eventList); + TS_ASSERT_EQUALS(target.getEventType(), EventType::TOF) + TS_ASSERT_EQUALS(target.getSortType(), eventList.getSortType()); + TS_ASSERT_EQUALS(target.getEvents(), eventList.getEvents()); + TS_ASSERT_THROWS(target.getWeightedEvents(), std::runtime_error); + TS_ASSERT_THROWS(target.getWeightedEventsNoTime(), std::runtime_error); + + eventList.switchTo(EventType::WEIGHTED); + target.copyDataFrom(eventList); + TS_ASSERT_EQUALS(target.getEventType(), EventType::WEIGHTED) + TS_ASSERT_EQUALS(target.getSortType(), eventList.getSortType()); + TS_ASSERT_THROWS(target.getEvents(), std::runtime_error); + TS_ASSERT_EQUALS(target.getWeightedEvents(), eventList.getWeightedEvents()); + TS_ASSERT_THROWS(target.getWeightedEventsNoTime(), std::runtime_error); + + eventList.switchTo(EventType::WEIGHTED_NOTIME); + target.copyDataFrom(eventList); + TS_ASSERT_EQUALS(target.getEventType(), EventType::WEIGHTED_NOTIME) + TS_ASSERT_EQUALS(target.getSortType(), eventList.getSortType()); + TS_ASSERT_THROWS(target.getEvents(), std::runtime_error); + TS_ASSERT_THROWS(target.getWeightedEvents(), std::runtime_error); + TS_ASSERT_EQUALS(target.getWeightedEventsNoTime(), + eventList.getWeightedEventsNoTime()); + } + //================================================================================== //--- Basics ---- //================================================================================== diff --git a/Framework/DataObjects/test/Histogram1DTest.h b/Framework/DataObjects/test/Histogram1DTest.h index b8c764d32f5a609ab99c1184d56cefcc40a5f7ca..8caa93e411e8d49069426a362e5a2b377fc06c7f 100644 --- a/Framework/DataObjects/test/Histogram1DTest.h +++ b/Framework/DataObjects/test/Histogram1DTest.h @@ -7,12 +7,15 @@ #include <cxxtest/TestSuite.h> #include "MantidHistogramData/LinearGenerator.h" +#include "MantidDataObjects/EventList.h" #include "MantidDataObjects/Histogram1D.h" +#include "MantidKernel/make_unique.h" -using Mantid::DataObjects::Histogram1D; -using Mantid::MantidVec; -using Mantid::Kernel::make_cow; -using namespace Mantid::HistogramData; +using namespace Mantid; +using namespace API; +using namespace Kernel; +using namespace DataObjects; +using namespace HistogramData; class Histogram1DTest : public CxxTest::TestSuite { private: @@ -45,6 +48,54 @@ public: h2.setCountStandardDeviations(100); } + void test_copyDataFrom() { + Histogram1D histogram{Histogram::XMode::Points, Histogram::YMode::Counts}; + histogram.setHistogram(Points(1), Counts(1)); + EventList eventList; + eventList.setHistogram(BinEdges(2)); + std::unique_ptr<const ISpectrum> specHist = + Kernel::make_unique<Histogram1D>(histogram); + std::unique_ptr<const ISpectrum> specEvent = + Kernel::make_unique<EventList>(eventList); + std::unique_ptr<ISpectrum> target = make_unique<Histogram1D>( + Histogram::XMode::Points, Histogram::YMode::Counts); + + TS_ASSERT_THROWS_NOTHING(target->copyDataFrom(*specHist)); + TS_ASSERT(target->points()); + TS_ASSERT_EQUALS(&target->points()[0], &histogram.points()[0]); + + TS_ASSERT_THROWS_NOTHING(target->copyDataFrom(*specEvent)); + TS_ASSERT(target->binEdges()); + TS_ASSERT_EQUALS(&target->binEdges()[0], &eventList.binEdges()[0]); + } + + void test_copyDataFrom_does_not_copy_indices() { + Histogram1D histogram{Histogram::XMode::Points, Histogram::YMode::Counts}; + histogram.setHistogram(Points(1), Counts(1)); + EventList eventList; + eventList.setHistogram(BinEdges(2)); + std::unique_ptr<const ISpectrum> specHist = + Kernel::make_unique<Histogram1D>(histogram); + std::unique_ptr<const ISpectrum> specEvent = + Kernel::make_unique<EventList>(eventList); + std::unique_ptr<ISpectrum> target = make_unique<Histogram1D>( + Histogram::XMode::Points, Histogram::YMode::Counts); + target->setSpectrumNo(37); + target->setDetectorID(42); + + TS_ASSERT_THROWS_NOTHING(target->copyDataFrom(*specHist)); + TS_ASSERT(target->points()); + TS_ASSERT_EQUALS(&target->points()[0], &histogram.points()[0]); + TS_ASSERT_EQUALS(target->getSpectrumNo(), 37); + TS_ASSERT_EQUALS(target->getDetectorIDs(), std::set<detid_t>{42}); + + TS_ASSERT_THROWS_NOTHING(target->copyDataFrom(*specEvent)); + TS_ASSERT(target->binEdges()); + TS_ASSERT_EQUALS(&target->binEdges()[0], &eventList.binEdges()[0]); + TS_ASSERT_EQUALS(target->getSpectrumNo(), 37); + TS_ASSERT_EQUALS(target->getDetectorIDs(), std::set<detid_t>{42}); + } + void testcheckAndSanitizeHistogramThrowsNullY() { Histogram1D h{Histogram::XMode::Points, Histogram::YMode::Counts}; BinEdges edges{-0.04, 1.7}; diff --git a/Framework/DataObjects/test/WorkspaceCreationTest.h b/Framework/DataObjects/test/WorkspaceCreationTest.h index dccc28d59ee2daf4cd37e614521de4aaf4094033..e788ffe3c44daecf7dd6b6062b9a61c480b17f2d 100644 --- a/Framework/DataObjects/test/WorkspaceCreationTest.h +++ b/Framework/DataObjects/test/WorkspaceCreationTest.h @@ -41,6 +41,33 @@ void run_create_partitioned(const Parallel::Communicator &comm) { } } TS_ASSERT_EQUALS(i.size(), expectedSize); + TS_ASSERT_EQUALS(ws->storageMode(), Parallel::StorageMode::Distributed); +} + +void run_create_partitioned_parent(const Parallel::Communicator &comm) { + IndexInfo indices(47, Parallel::StorageMode::Distributed, comm); + indices.setSpectrumDefinitions( + std::vector<SpectrumDefinition>(indices.size())); + const auto parent = + create<Workspace2D>(indices, Histogram(BinEdges{1, 2, 4})); + const auto ws = create<MatrixWorkspace>(*parent); + const auto &i = ws->indexInfo(); + TS_ASSERT_EQUALS(i.globalSize(), 47); + size_t expectedSize = 0; + for (size_t globalIndex = 0; globalIndex < i.globalSize(); ++globalIndex) { + // Current default is RoundRobinPartitioner + if (static_cast<int>(globalIndex) % comm.size() == comm.rank()) { + TS_ASSERT_EQUALS(i.spectrumNumber(expectedSize), + static_cast<int>(globalIndex) + 1); + ++expectedSize; + } + } + TS_ASSERT_EQUALS(parent->indexInfo().globalSize(), + ws->indexInfo().globalSize()); + TS_ASSERT_EQUALS(parent->indexInfo().size(), ws->indexInfo().size()); + TS_ASSERT_EQUALS(parent->getNumberHistograms(), ws->getNumberHistograms()); + TS_ASSERT_EQUALS(i.size(), expectedSize); + TS_ASSERT_EQUALS(ws->storageMode(), Parallel::StorageMode::Distributed); } void run_create_partitioned_with_instrument( @@ -334,12 +361,26 @@ public: check_zeroed_data(*ws); } - void test_create_parent_IndexInfo_same_size() { + void test_create_parent_same_size_does_not_ignore_IndexInfo_no_instrument() { const auto parent = create<Workspace2D>(2, Histogram(BinEdges{1, 2, 4})); const auto ws = create<Workspace2D>(*parent, make_indices_no_detectors(), parent->histogram(0)); - // If parent has same size, data in IndexInfo is ignored - check_default_indices(*ws); + // Even if parent has same size data in IndexInfo should not be ignored + // since it is given explicitly. + check_indices_no_detectors(*ws); + check_zeroed_data(*ws); + } + + void test_create_parent_same_size_does_not_ignore_IndexInfo() { + auto parentIndices = make_indices(); + parentIndices.setSpectrumNumbers({666, 1}); + const auto parent = create<Workspace2D>(m_instrument, parentIndices, + Histogram(BinEdges{1, 2, 4})); + const auto ws = + create<Workspace2D>(*parent, make_indices(), parent->histogram(0)); + // Even if parent has same size data in IndexInfo should not be ignored + // since it is given explicitly. + check_indices(*ws); check_zeroed_data(*ws); } @@ -441,9 +482,10 @@ public: TS_ASSERT_EQUALS(ws->storageMode(), Parallel::StorageMode::Distributed); } - void test_create_partitioned() { - run_create_partitioned(Parallel::Communicator{}); - runParallel(run_create_partitioned); + void test_create_partitioned() { runParallel(run_create_partitioned); } + + void test_create_partitioned_parent() { + runParallel(run_create_partitioned_parent); } void test_create_partitioned_with_instrument() { diff --git a/Framework/HistogramData/CMakeLists.txt b/Framework/HistogramData/CMakeLists.txt index fc89554acad2331a6195c818991e32d6d3edac23..fdaf4a9d8f5e1079f25206c1768493673211e172 100644 --- a/Framework/HistogramData/CMakeLists.txt +++ b/Framework/HistogramData/CMakeLists.txt @@ -32,6 +32,8 @@ set ( INC_FILES inc/MantidHistogramData/HistogramBuilder.h inc/MantidHistogramData/HistogramDx.h inc/MantidHistogramData/HistogramE.h + inc/MantidHistogramData/HistogramItem.h + inc/MantidHistogramData/HistogramIterator.h inc/MantidHistogramData/HistogramMath.h inc/MantidHistogramData/HistogramX.h inc/MantidHistogramData/HistogramY.h @@ -69,6 +71,7 @@ set ( TEST_FILES HistogramBuilderTest.h HistogramDxTest.h HistogramETest.h + HistogramIteratorTest.h HistogramMathTest.h HistogramTest.h HistogramXTest.h diff --git a/Framework/HistogramData/inc/MantidHistogramData/Histogram.h b/Framework/HistogramData/inc/MantidHistogramData/Histogram.h index 83e2b815d9056dc8bf748d0aec8b41ed81af93a7..0b3d1c4ccc5b9a5f2b223599ef9f0a0d947916a4 100644 --- a/Framework/HistogramData/inc/MantidHistogramData/Histogram.h +++ b/Framework/HistogramData/inc/MantidHistogramData/Histogram.h @@ -1,12 +1,11 @@ #ifndef MANTID_HISTOGRAMDATA_HISTOGRAM_H_ #define MANTID_HISTOGRAMDATA_HISTOGRAM_H_ -#include "MantidHistogramData/DllConfig.h" -#include "MantidKernel/cow_ptr.h" #include "MantidHistogramData/BinEdges.h" -#include "MantidHistogramData/Counts.h" #include "MantidHistogramData/CountStandardDeviations.h" #include "MantidHistogramData/CountVariances.h" +#include "MantidHistogramData/Counts.h" +#include "MantidHistogramData/DllConfig.h" #include "MantidHistogramData/Frequencies.h" #include "MantidHistogramData/FrequencyStandardDeviations.h" #include "MantidHistogramData/FrequencyVariances.h" @@ -14,15 +13,18 @@ #include "MantidHistogramData/HistogramE.h" #include "MantidHistogramData/HistogramX.h" #include "MantidHistogramData/HistogramY.h" -#include "MantidHistogramData/Points.h" #include "MantidHistogramData/PointStandardDeviations.h" #include "MantidHistogramData/PointVariances.h" +#include "MantidHistogramData/Points.h" +#include "MantidKernel/cow_ptr.h" #include <vector> namespace Mantid { namespace HistogramData { +class HistogramIterator; + /** Histogram Histogram is a container for objects that together represent a histogram. In @@ -190,6 +192,9 @@ public: void convertToCounts(); void convertToFrequencies(); + HistogramIterator begin() const; + HistogramIterator end() const; + private: template <class TX> void initX(const TX &x); template <class TY> void initY(const TY &y); diff --git a/Framework/HistogramData/inc/MantidHistogramData/HistogramItem.h b/Framework/HistogramData/inc/MantidHistogramData/HistogramItem.h new file mode 100644 index 0000000000000000000000000000000000000000..724d6cde7386b5b411c8bbd2d15cf0b463286cf6 --- /dev/null +++ b/Framework/HistogramData/inc/MantidHistogramData/HistogramItem.h @@ -0,0 +1,187 @@ +#ifndef MANTID_HISTOGRAMDATA_HISTOGRAMITEM_H_ +#define MANTID_HISTOGRAMDATA_HISTOGRAMITEM_H_ + +#include "MantidHistogramData/BinEdges.h" +#include "MantidHistogramData/DllConfig.h" +#include "MantidHistogramData/Histogram.h" +#include "MantidHistogramData/Points.h" + +#include <utility> + +namespace Mantid { +namespace HistogramData { + +/** HistogramItem + + HistogramItem represents a single index in a Histogram object. + + HistogramItem is the type that is returned when iterating over a + Histogram using the foreach loop syntax. HistogramItem provides + efficient access to a single point in the Histogram. + + HistogramItem will only perform conversions between counts and + frequencies or points and bins when explicitly told to. Code that + requires only a few values from a large Histogram may find this faster + than converting the whole X, Y or E value. + + @author Samuel Jackson + @date 2017 + + Copyright © 2016 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge + National Laboratory & European Spallation Source + + This file is part of Mantid. + + Mantid is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + Mantid is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. + + File change history is stored at: <https://github.com/mantidproject/mantid> + Code Documentation is available at: <http://doxygen.mantidproject.org> +*/ +class MANTID_HISTOGRAMDATA_DLL HistogramItem { + +public: + double center() const { + const auto &x = m_histogram.x(); + if (xModeIsPoints()) { + return x[m_index]; + } else { + return 0.5 * (x[m_index + 1] + x[m_index]); + } + } + + double binWidth() const { + const auto &x = m_histogram.x(); + if (xModeIsPoints()) { + auto numPoints = m_histogram.size(); + if (m_index == 0) { + // first point + return x[1] - x[0]; + } else if (m_index == numPoints - 1) { + // last point + return x[m_index] - x[m_index - 1]; + } else { + // everything inbetween + return 0.5 * (x[m_index + 1] - x[m_index - 1]); + } + } else { + return x[m_index + 1] - x[m_index]; + } + } + + double counts() const { + const auto &y = m_histogram.y(); + if (yModeIsCounts()) { + return y[m_index]; + } else { + return y[m_index] / binWidth(); + } + } + + double countVariance() const { + const auto &e = m_histogram.e(); + if (yModeIsCounts()) { + return e[m_index] * e[m_index]; + } else { + const auto width = binWidth(); + return e[m_index] * e[m_index] * width * width; + } + } + + double countStandardDeviation() const { + const auto &e = m_histogram.e(); + if (yModeIsCounts()) { + return e[m_index]; + } else { + const auto width = binWidth(); + return e[m_index] * width; + } + } + + double frequency() const { + const auto &y = m_histogram.y(); + if (yModeIsCounts()) { + return y[m_index] * binWidth(); + } else { + return y[m_index]; + } + } + + double frequencyVariance() const { + const auto &e = m_histogram.e(); + if (!yModeIsCounts()) { + return e[m_index] * e[m_index]; + } else { + const auto width = binWidth(); + return (e[m_index] * e[m_index]) / (width * width); + } + } + + double frequencyStandardDeviation() const { + const auto &e = m_histogram.e(); + if (!yModeIsCounts()) { + return e[m_index]; + } else { + const auto width = binWidth(); + return e[m_index] / width; + } + } + + void advance(int64_t delta) { + m_index = delta < 0 ? std::max(static_cast<uint64_t>(0), + static_cast<uint64_t>(m_index) + delta) + : std::min(m_histogram.size(), + m_index + static_cast<size_t>(delta)); + } + + void incrementIndex() { + if (m_index < m_histogram.size()) { + ++m_index; + } + } + + void decrementIndex() { + if (m_index > 0) { + --m_index; + } + } + + size_t getIndex() const { return m_index; } + + void setIndex(const size_t index) { m_index = index; } + +private: + friend class HistogramIterator; + + /// Private constructor, can only be created by HistogramIterator + HistogramItem(const Histogram &histogram, const size_t index) + : m_histogram(histogram), m_index(index) {} + + bool xModeIsPoints() const { + return Histogram::XMode::Points == m_histogram.xMode(); + } + + bool yModeIsCounts() const { + return Histogram::YMode::Counts == m_histogram.yMode(); + } + // Deleted assignment operator as a HistogramItem is not copyable + HistogramItem operator=(const HistogramItem &) = delete; + + const Histogram &m_histogram; + size_t m_index; +}; + +} // namespace HistogramData +} // namespace Mantid + +#endif diff --git a/Framework/HistogramData/inc/MantidHistogramData/HistogramIterator.h b/Framework/HistogramData/inc/MantidHistogramData/HistogramIterator.h new file mode 100644 index 0000000000000000000000000000000000000000..550708ef62e8a7c70f3b9b7d515bfd2a700ef998 --- /dev/null +++ b/Framework/HistogramData/inc/MantidHistogramData/HistogramIterator.h @@ -0,0 +1,80 @@ + +#ifndef MANTID_HISTOGRAMDATA_HISTOGRAMITERATOR_H_ +#define MANTID_HISTOGRAMDATA_HISTOGRAMITERATOR_H_ + +#include "MantidHistogramData/DllConfig.h" +#include "MantidHistogramData/HistogramItem.h" + +#include <boost/iterator/iterator_facade.hpp> +#include <memory> + +namespace Mantid { +namespace HistogramData { + +class Histogram; + +/** HistogramIterator + + HistogramIterator implements an the iterator interface for HistogramData. + At each position the iterator will point to an instance of a HistogramItem. + This item provides direct access to the values at a particular index. + + @author Samuel Jackson + @date 2017 + + Copyright © 2016 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge + National Laboratory & European Spallation Source + + This file is part of Mantid. + + Mantid is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + Mantid is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. + + File change history is stored at: <https://github.com/mantidproject/mantid> + Code Documentation is available at: <http://doxygen.mantidproject.org> +*/ +class MANTID_HISTOGRAMDATA_DLL HistogramIterator + : public boost::iterator_facade<HistogramIterator, const HistogramItem &, + boost::bidirectional_traversal_tag> { + +public: + HistogramIterator(const Histogram &histogram, const size_t index) + : m_item(histogram, index){}; + +private: + friend class boost::iterator_core_access; + + void increment() { m_item.incrementIndex(); } + + bool equal(const HistogramIterator &other) const { + return m_item.getIndex() == other.m_item.getIndex(); + } + + const HistogramItem &dereference() const { return m_item; } + + void decrement() { m_item.decrementIndex(); } + + void advance(int64_t delta) { m_item.advance(delta); } + + uint64_t distance_to(const HistogramIterator &other) const { + return static_cast<uint64_t>(other.m_item.getIndex()) - + static_cast<uint64_t>(m_item.getIndex()); + } + + HistogramItem m_item; +}; + +} // namespace HistogramData +} // namespace Mantid + +#endif diff --git a/Framework/HistogramData/src/Histogram.cpp b/Framework/HistogramData/src/Histogram.cpp index 39c138ecc0be44ab0d2ee170b203ea14d62738da..20c9759d88d8ff0acd5aff7fe8dcf2bf0a955a4b 100644 --- a/Framework/HistogramData/src/Histogram.cpp +++ b/Framework/HistogramData/src/Histogram.cpp @@ -1,4 +1,6 @@ #include "MantidHistogramData/Histogram.h" +#include "MantidHistogramData/HistogramIterator.h" + #include <sstream> namespace Mantid { @@ -283,5 +285,13 @@ void Histogram::resize(size_t n) { } } +HistogramIterator Histogram::begin() const { + return HistogramIterator(*this, 0); +} + +HistogramIterator Histogram::end() const { + return HistogramIterator(*this, size()); +} + } // namespace HistogramData } // namespace Mantid diff --git a/Framework/HistogramData/test/HistogramIteratorTest.h b/Framework/HistogramData/test/HistogramIteratorTest.h new file mode 100644 index 0000000000000000000000000000000000000000..4a7c707d281ebcc220ee72d31b82453465f5880d --- /dev/null +++ b/Framework/HistogramData/test/HistogramIteratorTest.h @@ -0,0 +1,329 @@ +#ifndef MANTID_HISTOGRAMDATA_HISTOGRAMITERATORTEST_H_ +#define MANTID_HISTOGRAMDATA_HISTOGRAMITERATORTEST_H_ + +#include <cxxtest/TestSuite.h> + +#include "MantidHistogramData/Histogram.h" +#include "MantidHistogramData/HistogramItem.h" +#include "MantidHistogramData/HistogramIterator.h" +#include "MantidHistogramData/LinearGenerator.h" + +using namespace Mantid::HistogramData; + +class HistogramIteratorTest : public CxxTest::TestSuite { +public: + // This pair of boilerplate methods prevent the suite being created statically + // This means the constructor isn't called when running other tests + static HistogramIteratorTest *createSuite() { + return new HistogramIteratorTest(); + } + static void destroySuite(HistogramIteratorTest *suite) { delete suite; } + + void test_construction() { + Histogram hist(Histogram::XMode::BinEdges, Histogram::YMode::Counts); + TS_ASSERT_THROWS_NOTHING(HistogramIterator iter(hist, 0)); + } + + void test_iterator_begin() { + Histogram hist(Points{1, 2, 3}, Frequencies{2, 3, 4}); + auto iter = hist.begin(); + TS_ASSERT(iter != hist.end()); + TS_ASSERT_EQUALS(iter->frequency(), 2); + } + + void test_iterator_end() { + Histogram hist(Points{1, 2, 3}, Frequencies{2, 3, 4}); + auto iter = hist.end(); + TS_ASSERT(iter != hist.begin()); + } + + void test_iterator_increment() { + Histogram hist(Points{1, 2, 3}, Frequencies{2, 3, 4}); + auto iter = hist.begin(); + TS_ASSERT(iter != hist.end()); + TS_ASSERT_EQUALS(iter->frequency(), 2); + ++iter; + TS_ASSERT(iter != hist.end()); + TS_ASSERT_EQUALS(iter->frequency(), 3); + ++iter; + TS_ASSERT(iter != hist.end()); + TS_ASSERT_EQUALS(iter->frequency(), 4); + ++iter; + TS_ASSERT(iter == hist.end()); + } + + void test_iterator_decrement() { + Histogram hist(Points{1, 2, 3}, Frequencies{2, 3, 4}); + auto iter = hist.end(); + --iter; + TS_ASSERT_DIFFERS(iter, hist.begin()); + TS_ASSERT_EQUALS(iter->frequency(), 4); + --iter; + TS_ASSERT_DIFFERS(iter, hist.begin()); + TS_ASSERT_EQUALS(iter->frequency(), 3); + --iter; + TS_ASSERT_EQUALS(iter, hist.begin()); + TS_ASSERT_EQUALS(iter->frequency(), 2); + } + + void test_iterator_advance() { + Histogram hist(Points{1, 2, 3}, Frequencies{2, 3, 4}); + auto iter = hist.begin(); + + std::advance(iter, 2); + TS_ASSERT_EQUALS(iter->frequency(), 4); + // check past end of valid range + std::advance(iter, 2); + TS_ASSERT_EQUALS(iter, hist.end()); + std::advance(iter, -3); + TS_ASSERT_EQUALS(iter->frequency(), 2); + // check before start of valid range + std::advance(iter, -2); + TS_ASSERT_EQUALS(iter, hist.begin()); + } + + void test_iterator_distance() { + Histogram hist(Points{1, 2, 3}, Frequencies{2, 3, 4}); + auto begin = hist.begin(); + auto end = hist.end(); + + TS_ASSERT_DIFFERS(begin, end); + TS_ASSERT_EQUALS(std::distance(begin, end), 3); + ++begin; + TS_ASSERT_DIFFERS(begin, end); + TS_ASSERT_EQUALS(std::distance(begin, end), 2); + --begin; + std::advance(begin, std::distance(begin, end) / 2); + TS_ASSERT_DIFFERS(begin, end); + TS_ASSERT_EQUALS(std::distance(begin, end), 2); + } + + void test_iterate_over_histogram_counts() { + Counts expectedCounts{2, 3, 4}; + Histogram hist(Points{1, 2, 3}, expectedCounts); + + TS_ASSERT(std::equal(hist.begin(), hist.end(), expectedCounts.begin(), + [](const HistogramItem &item, const double &counts) { + return item.counts() == counts; + })); + } + + void test_iterate_over_histogram_counts_when_histogram_has_frequencies() { + Histogram hist(Points{1, 2, 3}, Frequencies{2, 3, 4}); + Counts expectedCounts = hist.counts(); + + TS_ASSERT(std::equal(hist.begin(), hist.end(), expectedCounts.begin(), + [](const HistogramItem &item, const double &counts) { + return item.counts() == counts; + })); + } + + void test_iterate_over_histogram_frequencies() { + Frequencies expectedFrequencies{2, 3, 4}; + Histogram hist(Points{1, 2, 3}, expectedFrequencies); + + TS_ASSERT( + std::equal(hist.begin(), hist.end(), expectedFrequencies.begin(), + [](const HistogramItem &item, const double &frequency) { + return item.frequency() == frequency; + })); + } + + void test_iterate_over_histogram_frequencies_when_histogram_has_counts() { + Histogram hist(Points{1, 2, 3}, Counts{2, 3, 4}); + Frequencies expectedFrequencies = hist.frequencies(); + + TS_ASSERT( + std::equal(hist.begin(), hist.end(), expectedFrequencies.begin(), + [](const HistogramItem &item, const double &frequency) { + return item.frequency() == frequency; + })); + } + + void test_iterate_over_histogram_center_when_histogram_has_bins() { + Histogram hist(BinEdges{1, 2, 3, 4}, Counts{2, 3, 4}); + Points expectedPoints = hist.points(); + + TS_ASSERT(std::equal(hist.begin(), hist.end(), expectedPoints.begin(), + [](const HistogramItem &item, const double &point) { + return item.center() == point; + })); + } + + void test_iterate_over_histogram_center_when_histogram_has_points() { + Histogram hist(Points{1, 2, 3}, Counts{2, 3, 4}); + Points expectedPoints = hist.points(); + + TS_ASSERT(std::equal(hist.begin(), hist.end(), expectedPoints.begin(), + [](const HistogramItem &item, const double &point) { + return item.center() == point; + })); + } + + void test_iterate_over_histogram_width_when_histogram_has_bins() { + Histogram hist(BinEdges{1, 2, 3, 5}, Counts{2, 3, 4}); + std::vector<double> expectedWidths = {1, 1, 2}; + + TS_ASSERT(std::equal(hist.begin(), hist.end(), expectedWidths.begin(), + [](const HistogramItem &item, const double &width) { + return item.binWidth() == width; + })); + } + + void test_iterate_over_histogram_width_when_histogram_has_points() { + Histogram hist(Points{1, 3, 5}, Counts{2, 3, 4}); + std::vector<double> expectedWidths = {2, 2, 2}; + + TS_ASSERT(std::equal(hist.begin(), hist.end(), expectedWidths.begin(), + [](const HistogramItem &item, const double &width) { + return item.binWidth() == width; + })); + } + + void test_iterate_over_histogram_count_variances_when_histogram_has_counts() { + Histogram hist(BinEdges{1, 2, 3, 5}, Counts{2, 3, 4}); + auto expectedCountVariances = hist.countVariances(); + + TS_ASSERT(std::equal(hist.begin(), hist.end(), + expectedCountVariances.begin(), + [](const HistogramItem &item, const double &variance) { + return item.countVariance() == variance; + })); + } + + void + test_iterate_over_histogram_count_variances_when_histogram_has_frequencies() { + Histogram hist(BinEdges{1, 2, 3, 5}, Frequencies{2, 3, 4}); + auto expectedCountVariances = hist.countVariances(); + + TS_ASSERT(std::equal(hist.begin(), hist.end(), + expectedCountVariances.begin(), + [](const HistogramItem &item, const double &variance) { + return item.countVariance() == variance; + })); + } + + void test_iterate_over_histogram_count_std_when_histogram_has_counts() { + Histogram hist(BinEdges{1, 2, 3, 5}, Counts{2, 3, 4}); + auto expectedCountStd = hist.countStandardDeviations(); + + TS_ASSERT(std::equal(hist.begin(), hist.end(), expectedCountStd.begin(), + [](const HistogramItem &item, const double &sigma) { + return item.countStandardDeviation() == sigma; + })); + } + + void test_iterate_over_histogram_count_std_when_histogram_has_frequencies() { + Histogram hist(BinEdges{1, 2, 3, 5}, Frequencies{2, 3, 4}); + auto expectedCountStd = hist.countStandardDeviations(); + + TS_ASSERT(std::equal(hist.begin(), hist.end(), expectedCountStd.begin(), + [](const HistogramItem &item, const double &sigma) { + return item.countStandardDeviation() == sigma; + })); + } + + void + test_iterate_over_histogram_frequency_variances_when_histogram_has_frequencys() { + Histogram hist(BinEdges{1, 2, 3, 5}, Counts{2, 3, 4}); + auto expectedFrequencyVariances = hist.frequencyVariances(); + + TS_ASSERT(std::equal(hist.begin(), hist.end(), + expectedFrequencyVariances.begin(), + [](const HistogramItem &item, const double &variance) { + return item.frequencyVariance() == variance; + })); + } + + void + test_iterate_over_histogram_frequency_variances_when_histogram_has_frequencies() { + Histogram hist(BinEdges{1, 2, 3, 5}, Frequencies{2, 3, 4}); + auto expectedFrequencyVariances = hist.frequencyVariances(); + + TS_ASSERT(std::equal(hist.begin(), hist.end(), + expectedFrequencyVariances.begin(), + [](const HistogramItem &item, const double &variance) { + return item.frequencyVariance() == variance; + })); + } + + void + test_iterate_over_histogram_frequency_std_when_histogram_has_frequencys() { + Histogram hist(BinEdges{1, 2, 3, 5}, Counts{2, 3, 4}); + auto expectedFrequencyStd = hist.frequencyStandardDeviations(); + + TS_ASSERT(std::equal(hist.begin(), hist.end(), expectedFrequencyStd.begin(), + [](const HistogramItem &item, const double &sigma) { + return item.frequencyStandardDeviation() == sigma; + })); + } + + void + test_iterate_over_histogram_frequency_std_when_histogram_has_frequencies() { + Histogram hist(BinEdges{1, 2, 3, 5}, Frequencies{2, 3, 4}); + auto expectedFrequencyStd = hist.frequencyStandardDeviations(); + + TS_ASSERT(std::equal(hist.begin(), hist.end(), expectedFrequencyStd.begin(), + [](const HistogramItem &item, const double &sigma) { + return item.frequencyStandardDeviation() == sigma; + })); + } +}; + +class HistogramIteratorTestPerformance : public CxxTest::TestSuite { +public: + static HistogramIteratorTestPerformance *createSuite() { + return new HistogramIteratorTestPerformance; + } + static void destroySuite(HistogramIteratorTestPerformance *suite) { + delete suite; + } + + HistogramIteratorTestPerformance() + : m_hist(BinEdges(histSize, LinearGenerator(0, 1)), + Counts(histSize - 1, LinearGenerator(0, 1))) {} + + void test_convert_counts_to_frequency_for_each_item() { + double total = 0; + for (size_t i = 0; i < nHists; i++) + for (auto &item : m_hist) + total += item.frequency(); + } + + void test_convert_counts_to_frequency_once_per_histogram() { + double total = 0; + for (size_t i = 0; i < nHists; i++) { + const auto &frequencies = m_hist.frequencies(); + for (auto &frequency : frequencies) + total += frequency; + } + } + + void test_convert_counts_to_frequency_for_each_item_sparse() { + double total = 0; + for (size_t i = 0; i < nHists; i++) { + for (auto &item : m_hist) { + if (item.counts() > histSize - 5) + total += item.frequency(); + } + } + } + + void test_convert_counts_to_frequency_once_per_histogram_sparse() { + double total = 0; + for (size_t i = 0; i < nHists; i++) { + const auto &counts = m_hist.counts(); + const auto &frequencies = m_hist.frequencies(); + for (size_t j = 0; j < histSize; ++j) + if (counts[j] > histSize - 5) + total += frequencies[j]; + } + } + +private: + const size_t nHists = 1000; + const size_t histSize = 1000000; + Histogram m_hist; +}; + +#endif /* MANTID_HISTOGRAMDATA_HISTOGRAMITERATORTEST_H_ */ diff --git a/Framework/HistogramData/test/HistogramTest.h b/Framework/HistogramData/test/HistogramTest.h index 8cca766e21a5763b71b01c7437619ae16476a842..86fff631977c2349d692e2de7eca6ad41761aa58 100644 --- a/Framework/HistogramData/test/HistogramTest.h +++ b/Framework/HistogramData/test/HistogramTest.h @@ -4,6 +4,7 @@ #include <cxxtest/TestSuite.h> #include "MantidHistogramData/Histogram.h" +#include "MantidHistogramData/HistogramIterator.h" #include "MantidHistogramData/LinearGenerator.h" using Mantid::HistogramData::Histogram; @@ -1149,6 +1150,15 @@ public: TS_ASSERT(!h.sharedE()); TS_ASSERT(!h.sharedDx()); } + + void test_that_can_iterate_histogram() { + Histogram hist(Points{0.1, 0.2, 0.4}, Counts{1, 2, 4}); + double total = 0; + for (const auto &item : hist) { + total += item.counts(); + } + TS_ASSERT_EQUALS(total, 7) + } }; class HistogramTestPerformance : public CxxTest::TestSuite { diff --git a/Framework/Indexing/CMakeLists.txt b/Framework/Indexing/CMakeLists.txt index e5b69e1a9de4cb53c493f6632026201e14b6fb41..33fc4cf478c8e7f3b4bc0bbc1c926f33bde33f77 100644 --- a/Framework/Indexing/CMakeLists.txt +++ b/Framework/Indexing/CMakeLists.txt @@ -5,6 +5,7 @@ set ( SRC_FILES src/LegacyConversion.cpp src/Partitioner.cpp src/RoundRobinPartitioner.cpp + src/Scatter.cpp src/SpectrumNumberTranslator.cpp ) @@ -22,6 +23,7 @@ set ( INC_FILES inc/MantidIndexing/PartitionIndex.h inc/MantidIndexing/Partitioner.h inc/MantidIndexing/RoundRobinPartitioner.h + inc/MantidIndexing/Scatter.h inc/MantidIndexing/SpectrumIndexSet.h inc/MantidIndexing/SpectrumNumber.h inc/MantidIndexing/SpectrumNumberTranslator.h @@ -40,6 +42,7 @@ set ( TEST_FILES PartitionIndexTest.h PartitionerTest.h RoundRobinPartitionerTest.h + ScatterTest.h SpectrumIndexSetTest.h SpectrumNumberTest.h SpectrumNumberTranslatorTest.h diff --git a/Framework/Indexing/inc/MantidIndexing/Extract.h b/Framework/Indexing/inc/MantidIndexing/Extract.h index ffb31d730f47dd79d5d1a5898b23463d20cdd289..283b2d1e62d074b1a8102ed42bc6c1a5273059a6 100644 --- a/Framework/Indexing/inc/MantidIndexing/Extract.h +++ b/Framework/Indexing/inc/MantidIndexing/Extract.h @@ -8,6 +8,7 @@ namespace Mantid { namespace Indexing { class IndexInfo; +class SpectrumIndexSet; /** Functions for extracting spectra. A new IndexInfo with the desired spectra is created based on an existing one. @@ -37,6 +38,8 @@ class IndexInfo; Code Documentation is available at: <http://doxygen.mantidproject.org> */ MANTID_INDEXING_DLL IndexInfo +extract(const IndexInfo &source, const SpectrumIndexSet &indices); +MANTID_INDEXING_DLL IndexInfo extract(const IndexInfo &source, const std::vector<size_t> &indices); MANTID_INDEXING_DLL IndexInfo extract(const IndexInfo &source, const size_t minIndex, const size_t maxIndex); diff --git a/Framework/Indexing/inc/MantidIndexing/IndexInfo.h b/Framework/Indexing/inc/MantidIndexing/IndexInfo.h index ded7d80947676efd3964bfb915c3ced210242a57..b96a988ca0a99235f8c381acc52fbf0695404f16 100644 --- a/Framework/Indexing/inc/MantidIndexing/IndexInfo.h +++ b/Framework/Indexing/inc/MantidIndexing/IndexInfo.h @@ -83,6 +83,8 @@ public: IndexInfo(std::vector<SpectrumNumber> spectrumNumbers, const Parallel::StorageMode storageMode, const Parallel::Communicator &communicator); + template <class IndexType> + IndexInfo(std::vector<IndexType> indices, const IndexInfo &parent); IndexInfo(const IndexInfo &other); IndexInfo(IndexInfo &&other); @@ -120,7 +122,7 @@ public: bool isOnThisPartition(GlobalSpectrumIndex globalIndex) const; Parallel::StorageMode storageMode() const; - Parallel::Communicator communicator() const; + const Parallel::Communicator &communicator() const; private: void makeSpectrumNumberTranslator( diff --git a/Framework/Indexing/inc/MantidIndexing/IndexSet.h b/Framework/Indexing/inc/MantidIndexing/IndexSet.h index d1030119495fcbc92d8e1db5168572a367a692bc..455c5324aecb9f879e52ea9c4fcdd33ff32618bb 100644 --- a/Framework/Indexing/inc/MantidIndexing/IndexSet.h +++ b/Framework/Indexing/inc/MantidIndexing/IndexSet.h @@ -140,15 +140,18 @@ IndexSet<T>::IndexSet(int64_t min, int64_t max, size_t fullRange) { } /// Constructor for a set containing all specified indices. Range is verified at -/// construction time and duplicates are removed. +/// construction time and duplicates cause an error. template <class T> IndexSet<T>::IndexSet(const std::vector<size_t> &indices, size_t fullRange) : m_isRange(false) { - // We use a set to create unique and ordered indices. - std::set<size_t> index_set(indices.cbegin(), indices.cend()); - if (!index_set.empty() && *(index_set.rbegin()) >= fullRange) + // Validate indices, using m_indices as buffer (reassigned later). + m_indices = indices; + std::sort(m_indices.begin(), m_indices.end()); + if (!m_indices.empty() && *m_indices.rbegin() >= fullRange) throw std::out_of_range("IndexSet: specified index is out of range"); - m_indices = std::vector<size_t>(index_set.begin(), index_set.end()); + if (std::adjacent_find(m_indices.begin(), m_indices.end()) != m_indices.end()) + throw std::runtime_error("IndexSet: duplicate indices are not allowed"); + m_indices = indices; m_size = m_indices.size(); } diff --git a/Framework/Indexing/inc/MantidIndexing/Scatter.h b/Framework/Indexing/inc/MantidIndexing/Scatter.h new file mode 100644 index 0000000000000000000000000000000000000000..2560e41b19310396859de26b9955ba17902b9f32 --- /dev/null +++ b/Framework/Indexing/inc/MantidIndexing/Scatter.h @@ -0,0 +1,42 @@ +#ifndef MANTID_INDEXING_SCATTER_H_ +#define MANTID_INDEXING_SCATTER_H_ + +#include "MantidIndexing/DllConfig.h" + +namespace Mantid { +namespace Indexing { +class IndexInfo; + +/** Scattering for IndexInfo, in particular changing its storage mode to + Parallel::StorageMode::Distributed. + + @author Simon Heybrock + @date 2017 + + Copyright © 2017 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge + National Laboratory & European Spallation Source + + This file is part of Mantid. + + Mantid is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + Mantid is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. + + File change history is stored at: <https://github.com/mantidproject/mantid> + Code Documentation is available at: <http://doxygen.mantidproject.org> +*/ +MANTID_INDEXING_DLL IndexInfo scatter(const IndexInfo &indexInfo); + +} // namespace Indexing +} // namespace Mantid + +#endif /* MANTID_INDEXING_SCATTER_H_ */ diff --git a/Framework/Indexing/inc/MantidIndexing/SpectrumNumberTranslator.h b/Framework/Indexing/inc/MantidIndexing/SpectrumNumberTranslator.h index a4ca36f1d938dab45058024b3cb3a5a5f672ebae..eadb19a261b6b5dc35c945233fc71594786fbb96 100644 --- a/Framework/Indexing/inc/MantidIndexing/SpectrumNumberTranslator.h +++ b/Framework/Indexing/inc/MantidIndexing/SpectrumNumberTranslator.h @@ -50,6 +50,11 @@ public: SpectrumNumberTranslator(const std::vector<SpectrumNumber> &spectrumNumbers, std::unique_ptr<Partitioner> partitioner, const PartitionIndex &partition); + SpectrumNumberTranslator(const std::vector<SpectrumNumber> &spectrumNumbers, + const SpectrumNumberTranslator &parent); + SpectrumNumberTranslator( + const std::vector<GlobalSpectrumIndex> &globalIndices, + const SpectrumNumberTranslator &parent); size_t globalSize() const; size_t localSize() const; @@ -70,6 +75,8 @@ private: void checkUniqueSpectrumNumbers() const; // Not thread-safe! Use only in combination with std::call_once! void setupSpectrumNumberToIndexMap() const; + std::vector<SpectrumNumber> + spectrumNumbers(const std::vector<GlobalSpectrumIndex> &globalIndices) const; struct SpectrumNumberHash { std::size_t operator()(const SpectrumNumber &spectrumNumber) const { diff --git a/Framework/Indexing/src/Extract.cpp b/Framework/Indexing/src/Extract.cpp index ecda1ff29d68db83d3f1c6ff5246672d3e2bf6b5..a1d0ce74590cc6ea969a66e217cae6b7ab95864e 100644 --- a/Framework/Indexing/src/Extract.cpp +++ b/Framework/Indexing/src/Extract.cpp @@ -1,13 +1,30 @@ #include "MantidIndexing/Extract.h" #include "MantidIndexing/IndexInfo.h" +#include "MantidIndexing/SpectrumIndexSet.h" #include "MantidTypes/SpectrumDefinition.h" namespace Mantid { namespace Indexing { +namespace { +void checkStorageMode(const IndexInfo &indexInfo) { + using namespace Parallel; + if (indexInfo.storageMode() == StorageMode::Distributed) + throw std::runtime_error("extract() does not support " + + Parallel::toString(StorageMode::Distributed)); +} +} + +/// Extracts IndexInfo from source IndexInfo, extracting data for all indices +/// specified by index set. +IndexInfo extract(const IndexInfo &source, const SpectrumIndexSet &indices) { + return extract(source, std::vector<size_t>(indices.begin(), indices.end())); +} + /// Extracts IndexInfo from source IndexInfo, extracting data for all indices /// specified by vector. IndexInfo extract(const IndexInfo &source, const std::vector<size_t> &indices) { + checkStorageMode(source); std::vector<SpectrumNumber> specNums; std::vector<SpectrumDefinition> specDefs; const auto &sourceDefs = source.spectrumDefinitions(); @@ -24,6 +41,7 @@ IndexInfo extract(const IndexInfo &source, const std::vector<size_t> &indices) { /// specified by range. IndexInfo extract(const IndexInfo &source, const size_t minIndex, const size_t maxIndex) { + checkStorageMode(source); std::vector<SpectrumNumber> specNums; std::vector<SpectrumDefinition> specDefs; const auto &sourceDefs = source.spectrumDefinitions(); diff --git a/Framework/Indexing/src/IndexInfo.cpp b/Framework/Indexing/src/IndexInfo.cpp index 0f27f32d743d749a200521fbba012f08cbba6cee..fa151a5a233a27f7a9f80b84caf6a92c184d72f2 100644 --- a/Framework/Indexing/src/IndexInfo.cpp +++ b/Framework/Indexing/src/IndexInfo.cpp @@ -14,13 +14,13 @@ namespace Mantid { namespace Indexing { /// Construct a default IndexInfo, with contiguous spectrum numbers starting at -/// 1 and no detector IDs. +/// 1 and no spectrum definitions. IndexInfo::IndexInfo(const size_t globalSize, const Parallel::StorageMode storageMode) : IndexInfo(globalSize, storageMode, Parallel::Communicator{}) {} /// Construct a default IndexInfo, with contiguous spectrum numbers starting at -/// 1 and no detector IDs. +/// 1 and no spectrum definitions. IndexInfo::IndexInfo(const size_t globalSize, const Parallel::StorageMode storageMode, const Parallel::Communicator &communicator) @@ -33,15 +33,15 @@ IndexInfo::IndexInfo(const size_t globalSize, makeSpectrumNumberTranslator(std::move(specNums)); } -/// Construct with given spectrum number and vector of detector IDs for each -/// index. +/// Construct with given spectrum number for each index and no spectrum +/// definitions. IndexInfo::IndexInfo(std::vector<SpectrumNumber> spectrumNumbers, const Parallel::StorageMode storageMode) : IndexInfo(std::move(spectrumNumbers), storageMode, Parallel::Communicator{}) {} -/// Construct with given spectrum number and vector of detector IDs for each -/// index. +/// Construct with given spectrum number for each index and no spectrum +/// definitions. IndexInfo::IndexInfo(std::vector<SpectrumNumber> spectrumNumbers, const Parallel::StorageMode storageMode, const Parallel::Communicator &communicator) @@ -51,6 +51,30 @@ IndexInfo::IndexInfo(std::vector<SpectrumNumber> spectrumNumbers, makeSpectrumNumberTranslator(std::move(spectrumNumbers)); } +/** Construct with given index subset of parent. + * + * The template argument IndexType can be SpectrumNumber or GlobalSpectrumIndex. + * The parent defines the partitioning of the spectrum numbers, i.e., the + * partition assigned to a given spectrum number in the constructed IndexInfo is + * given by the partition that spectrum number has in parent. This is used to + * extract spectrum numbers while maintaining the partitioning, avoiding the + * need to redistribute data between partitions (MPI ranks). Throws if any of + * the spectrum numbers is not present in parent. */ +template <class IndexType> +IndexInfo::IndexInfo(std::vector<IndexType> indices, const IndexInfo &parent) + : m_storageMode(parent.m_storageMode), + m_communicator( + Kernel::make_unique<Parallel::Communicator>(*parent.m_communicator)) { + if (const auto parentSpectrumDefinitions = parent.spectrumDefinitions()) { + m_spectrumDefinitions = Kernel::make_cow<std::vector<SpectrumDefinition>>(); + auto &specDefs = m_spectrumDefinitions.access(); + for (const auto i : parent.makeIndexSet(indices)) + specDefs.push_back(parentSpectrumDefinitions->operator[](i)); + } + m_spectrumNumberTranslator = Kernel::make_cow<SpectrumNumberTranslator>( + std::move(indices), *parent.m_spectrumNumberTranslator); +} + IndexInfo::IndexInfo(const IndexInfo &other) : m_storageMode(other.m_storageMode), m_communicator( @@ -254,7 +278,7 @@ bool IndexInfo::isOnThisPartition(GlobalSpectrumIndex globalIndex) const { Parallel::StorageMode IndexInfo::storageMode() const { return m_storageMode; } /// Returns the communicator used in MPI runs. -Parallel::Communicator IndexInfo::communicator() const { +const Parallel::Communicator &IndexInfo::communicator() const { return *m_communicator; } @@ -286,5 +310,10 @@ void IndexInfo::makeSpectrumNumberTranslator( std::move(spectrumNumbers), std::move(partitioner), partition); } +template MANTID_INDEXING_DLL IndexInfo::IndexInfo(std::vector<SpectrumNumber>, + const IndexInfo &); +template MANTID_INDEXING_DLL +IndexInfo::IndexInfo(std::vector<GlobalSpectrumIndex>, const IndexInfo &); + } // namespace Indexing } // namespace Mantid diff --git a/Framework/Indexing/src/Scatter.cpp b/Framework/Indexing/src/Scatter.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9d0d41af439aec0500bf8c87567025661f5a552f --- /dev/null +++ b/Framework/Indexing/src/Scatter.cpp @@ -0,0 +1,37 @@ +#include "MantidIndexing/IndexInfo.h" +#include "MantidIndexing/GlobalSpectrumIndex.h" +#include "MantidIndexing/Scatter.h" +#include "MantidIndexing/SpectrumNumber.h" +#include "MantidParallel/Communicator.h" +#include "MantidTypes/SpectrumDefinition.h" + +namespace Mantid { +namespace Indexing { + +/// Returns a scattered copy of `indexInfo` with storage mode `Distributed`. +IndexInfo scatter(const Indexing::IndexInfo &indexInfo) { + using namespace Parallel; + if (indexInfo.communicator().size() == 1 || + indexInfo.storageMode() == Parallel::StorageMode::Distributed) + return indexInfo; + if (indexInfo.storageMode() == Parallel::StorageMode::MasterOnly) + throw std::runtime_error( + "Cannot scatter IndexInfo with unsupported storage mode " + + toString(StorageMode::MasterOnly)); + + std::vector<SpectrumNumber> spectrumNumbers; + for (size_t i = 0; i < indexInfo.size(); ++i) + spectrumNumbers.push_back(indexInfo.spectrumNumber(i)); + IndexInfo scattered(spectrumNumbers, Parallel::StorageMode::Distributed, + indexInfo.communicator()); + const auto &globalSpectrumDefinitions = indexInfo.spectrumDefinitions(); + std::vector<SpectrumDefinition> spectrumDefinitions; + for (size_t i = 0; i < indexInfo.size(); ++i) + if (scattered.isOnThisPartition(static_cast<GlobalSpectrumIndex>(i))) + spectrumDefinitions.emplace_back((*globalSpectrumDefinitions)[i]); + scattered.setSpectrumDefinitions(spectrumDefinitions); + return scattered; +} + +} // namespace Indexing +} // namespace Mantid diff --git a/Framework/Indexing/src/SpectrumNumberTranslator.cpp b/Framework/Indexing/src/SpectrumNumberTranslator.cpp index ac613909860e3df63d815d1d40beb5dbe8704ec9..60ad3ac1e61fe4af82ee61de5d3931cf04dab72c 100644 --- a/Framework/Indexing/src/SpectrumNumberTranslator.cpp +++ b/Framework/Indexing/src/SpectrumNumberTranslator.cpp @@ -63,6 +63,31 @@ SpectrumNumberTranslator::SpectrumNumberTranslator( // called. } +SpectrumNumberTranslator::SpectrumNumberTranslator( + const std::vector<SpectrumNumber> &spectrumNumbers, + const SpectrumNumberTranslator &parent) + : m_isPartitioned(parent.m_isPartitioned), m_partition(parent.m_partition), + m_globalSpectrumNumbers(spectrumNumbers) { + size_t currentIndex = 0; + for (size_t i = 0; i < m_globalSpectrumNumbers.size(); ++i) { + auto partition = parent.m_spectrumNumberToPartition.at(spectrumNumbers[i]); + auto number = m_globalSpectrumNumbers[i]; + m_spectrumNumberToPartition.emplace(number, partition); + if (partition == m_partition) { + m_spectrumNumberToIndex.emplace_back(number, currentIndex); + m_globalToLocal.emplace_back(GlobalSpectrumIndex(i), currentIndex); + if (m_isPartitioned) + m_spectrumNumbers.emplace_back(number); + ++currentIndex; + } + } +} + +SpectrumNumberTranslator::SpectrumNumberTranslator( + const std::vector<GlobalSpectrumIndex> &globalIndices, + const SpectrumNumberTranslator &parent) + : SpectrumNumberTranslator(parent.spectrumNumbers(globalIndices), parent) {} + SpectrumIndexSet SpectrumNumberTranslator::makeIndexSet(SpectrumNumber min, SpectrumNumber max) const { @@ -144,5 +169,14 @@ void SpectrumNumberTranslator::setupSpectrumNumberToIndexMap() const { -> bool { return std::get<0>(a) < std::get<0>(b); }); } +std::vector<SpectrumNumber> SpectrumNumberTranslator::spectrumNumbers( + const std::vector<GlobalSpectrumIndex> &globalIndices) const { + std::vector<SpectrumNumber> spectrumNumbers; + for (const auto index : globalIndices) + spectrumNumbers.push_back( + m_globalSpectrumNumbers[static_cast<size_t>(index)]); + return spectrumNumbers; +} + } // namespace Indexing } // namespace Mantid diff --git a/Framework/Indexing/test/ExtractTest.h b/Framework/Indexing/test/ExtractTest.h index 14bf99b3041abde6bd0d62de040c705cc6c906ed..79286a2bd07aad5222c650b65f08461cac4964f9 100644 --- a/Framework/Indexing/test/ExtractTest.h +++ b/Framework/Indexing/test/ExtractTest.h @@ -5,6 +5,7 @@ #include "MantidIndexing/Extract.h" #include "MantidIndexing/IndexInfo.h" +#include "MantidIndexing/SpectrumIndexSet.h" #include "MantidTypes/SpectrumDefinition.h" using namespace Mantid; @@ -33,6 +34,23 @@ public: TS_ASSERT_EQUALS((*result.spectrumDefinitions())[1], specDefs[2]); } + void test_extract_SpectrumIndexSet() { + IndexInfo source({1, 2, 3}); + std::vector<SpectrumDefinition> specDefs(3); + specDefs[0].add(10); + specDefs[1].add(20); + specDefs[2].add(30); + source.setSpectrumDefinitions(specDefs); + std::vector<SpectrumNumber> indices{{1, 3}}; + const auto indexSet = source.makeIndexSet(indices); + auto result = extract(source, indexSet); + TS_ASSERT_EQUALS(result.size(), 2); + TS_ASSERT_EQUALS(result.spectrumNumber(0), 1); + TS_ASSERT_EQUALS(result.spectrumNumber(1), 3); + TS_ASSERT_EQUALS((*result.spectrumDefinitions())[0], specDefs[0]); + TS_ASSERT_EQUALS((*result.spectrumDefinitions())[1], specDefs[2]); + } + void test_reorder() { IndexInfo source({1, 2, 3}); std::vector<SpectrumDefinition> specDefs(3); diff --git a/Framework/Indexing/test/IndexInfoTest.h b/Framework/Indexing/test/IndexInfoTest.h index 7e16ef641f317550f9d78c9cb4fc31629f3c6ae8..08ef126fbbaa73d5b5910230647584a2ba12d8b9 100644 --- a/Framework/Indexing/test/IndexInfoTest.h +++ b/Framework/Indexing/test/IndexInfoTest.h @@ -74,6 +74,26 @@ void run_isOnThisPartition_StorageMode_Distributed( } } } + +void run_construct_from_parent_StorageMode_Distributed( + const Parallel::Communicator &comm) { + IndexInfo parent(47, Parallel::StorageMode::Distributed, comm); + IndexInfo i(std::vector<GlobalSpectrumIndex>{10, 11, 12, 13, 14, 15, 16}, + parent); + size_t expectedSize = 0; + // Rank in `i` is given by rank in parent, so we iterate parent! + for (size_t globalIndex = 0; globalIndex < parent.globalSize(); + ++globalIndex) { + if (static_cast<int>(globalIndex) % comm.size() == comm.rank()) { + if (globalIndex >= 10 && globalIndex <= 16) { + TS_ASSERT_EQUALS(i.spectrumNumber(expectedSize), + static_cast<int>(globalIndex) + 1); + ++expectedSize; + } + } + } + TS_ASSERT_EQUALS(i.size(), expectedSize); +} } class IndexInfoTest : public CxxTest::TestSuite { @@ -104,6 +124,46 @@ public: TS_ASSERT_EQUALS(info.spectrumNumber(2), 1); } + void test_construct_from_parent_reorder() { + IndexInfo parent({3, 2, 1}); + std::vector<SpectrumDefinition> specDefs(3); + specDefs[0].add(6); + specDefs[0].add(7); + specDefs[2].add(8); + parent.setSpectrumDefinitions(specDefs); + + IndexInfo i(std::vector<SpectrumNumber>{2, 1, 3}, parent); + + TS_ASSERT_EQUALS(i.size(), 3); + TS_ASSERT_EQUALS(i.globalSize(), 3); + TS_ASSERT_EQUALS(i.spectrumNumber(0), 2); + TS_ASSERT_EQUALS(i.spectrumNumber(1), 1); + TS_ASSERT_EQUALS(i.spectrumNumber(2), 3); + TS_ASSERT(i.spectrumDefinitions()); + TS_ASSERT_EQUALS((*i.spectrumDefinitions())[0], specDefs[1]); + TS_ASSERT_EQUALS((*i.spectrumDefinitions())[1], specDefs[2]); + TS_ASSERT_EQUALS((*i.spectrumDefinitions())[2], specDefs[0]); + } + + void test_construct_from_parent_filter() { + IndexInfo parent({3, 2, 1}); + std::vector<SpectrumDefinition> specDefs(3); + specDefs[0].add(6); + specDefs[0].add(7); + specDefs[2].add(8); + parent.setSpectrumDefinitions(specDefs); + + IndexInfo i(std::vector<SpectrumNumber>{1, 2}, parent); + + TS_ASSERT_EQUALS(i.size(), 2); + TS_ASSERT_EQUALS(i.globalSize(), 2); + TS_ASSERT_EQUALS(i.spectrumNumber(0), 1); + TS_ASSERT_EQUALS(i.spectrumNumber(1), 2); + TS_ASSERT(i.spectrumDefinitions()); + TS_ASSERT_EQUALS((*i.spectrumDefinitions())[0], specDefs[2]); + TS_ASSERT_EQUALS((*i.spectrumDefinitions())[1], specDefs[1]); + } + void test_size() { TS_ASSERT_EQUALS(IndexInfo(3).size(), 3); } void test_copy() { @@ -301,6 +361,10 @@ public: run_isOnThisPartition_StorageMode_Distributed(Parallel::Communicator{}); } + void test_construct_from_parent_StorageMode_Distributed() { + runParallel(run_construct_from_parent_StorageMode_Distributed); + } + private: #ifdef MPI_EXPERIMENTAL boost::mpi::environment m_environment; diff --git a/Framework/Indexing/test/IndexSetTest.h b/Framework/Indexing/test/IndexSetTest.h index bb74b45d535d65e42663e673300ac9895cbaeeb1..461bc224edea4482bf09df74528f3dbfdfdbab06 100644 --- a/Framework/Indexing/test/IndexSetTest.h +++ b/Framework/Indexing/test/IndexSetTest.h @@ -84,12 +84,18 @@ public: TS_ASSERT_EQUALS(set[1], 2); } - void test_indexList() { - // Note duplicate index - IndexSetTester set({2, 1, 2}, 3); - TS_ASSERT_EQUALS(set.size(), 2); - TS_ASSERT_EQUALS(set[0], 1); - TS_ASSERT_EQUALS(set[1], 2); + void test_indexList_order_preserved() { + IndexSetTester set({2, 1, 3}, 4); + TS_ASSERT_EQUALS(set.size(), 3); + TS_ASSERT_EQUALS(set[0], 2); + TS_ASSERT_EQUALS(set[1], 1); + TS_ASSERT_EQUALS(set[2], 3); + } + + void test_indexList_duplicate_throws() { + TS_ASSERT_THROWS_EQUALS(IndexSetTester({2, 1, 2}, 3), + const std::runtime_error &e, std::string(e.what()), + "IndexSet: duplicate indices are not allowed"); } void test_iterator_basics() { diff --git a/Framework/Indexing/test/ScatterTest.h b/Framework/Indexing/test/ScatterTest.h new file mode 100644 index 0000000000000000000000000000000000000000..f52b973f03aeb34f430fd421962e05dc47a8d00e --- /dev/null +++ b/Framework/Indexing/test/ScatterTest.h @@ -0,0 +1,89 @@ +#ifndef MANTID_INDEXING_SCATTERTEST_H_ +#define MANTID_INDEXING_SCATTERTEST_H_ + +#include <cxxtest/TestSuite.h> + +#include "MantidIndexing/IndexInfo.h" +#include "MantidIndexing/Scatter.h" +#include "MantidIndexing/SpectrumIndexSet.h" +#include "MantidParallel/Communicator.h" +#include "MantidTypes/SpectrumDefinition.h" + +#include "MantidTestHelpers/ParallelRunner.h" + +using namespace Mantid; +using namespace Mantid::Indexing; +using namespace Mantid::Parallel; +using namespace ParallelTestHelpers; + +namespace { +IndexInfo makeIndexInfo(const Communicator &comm = Communicator()) { + IndexInfo indexInfo(7, StorageMode::Cloned, comm); + std::vector<SpectrumDefinition> specDefs; + specDefs.emplace_back(1); + specDefs.emplace_back(2); + specDefs.emplace_back(4); + specDefs.emplace_back(8); + specDefs.emplace_back(); + specDefs.emplace_back(); + specDefs.emplace_back(); + indexInfo.setSpectrumDefinitions(std::move(specDefs)); + return indexInfo; +} + +void run_StorageMode_Cloned(const Communicator &comm) { + const auto indexInfo = makeIndexInfo(comm); + if (comm.size() == 1) { + TS_ASSERT_THROWS_NOTHING(scatter(indexInfo)); + } else { + const auto result = scatter(indexInfo); + TS_ASSERT_EQUALS(result.storageMode(), StorageMode::Distributed); + TS_ASSERT_EQUALS(result.globalSize(), indexInfo.size()); + // Assuming round-robin partitioning + TS_ASSERT_EQUALS(result.size(), + (indexInfo.size() + comm.size() - 1 - comm.rank()) / + comm.size()); + const auto resultSpecDefs = result.spectrumDefinitions(); + const auto specDefs = indexInfo.spectrumDefinitions(); + const auto indices = result.makeIndexSet(); + size_t current = 0; + for (size_t i = 0; i < specDefs->size(); ++i) { + if (static_cast<int>(i) % comm.size() == comm.rank()) { + TS_ASSERT_EQUALS(result.spectrumNumber(indices[current]), + indexInfo.spectrumNumber(i)); + TS_ASSERT_EQUALS(resultSpecDefs->at(indices[current]), specDefs->at(i)); + ++current; + } + } + } +} +} + +class ScatterTest : public CxxTest::TestSuite { +public: + // This pair of boilerplate methods prevent the suite being created statically + // This means the constructor isn't called when running other tests + static ScatterTest *createSuite() { return new ScatterTest(); } + static void destroySuite(ScatterTest *suite) { delete suite; } + + void test_1_rank() { + const auto indexInfo = makeIndexInfo(); + const auto result = scatter(indexInfo); + TS_ASSERT_EQUALS(result.storageMode(), StorageMode::Cloned); + TS_ASSERT_EQUALS(result.globalSize(), indexInfo.size()); + TS_ASSERT_EQUALS(result.size(), indexInfo.size()); + TS_ASSERT_EQUALS(result.spectrumDefinitions(), + indexInfo.spectrumDefinitions()); + TS_ASSERT_EQUALS(result.spectrumNumber(0), indexInfo.spectrumNumber(0)); + TS_ASSERT_EQUALS(result.spectrumNumber(1), indexInfo.spectrumNumber(1)); + TS_ASSERT_EQUALS(result.spectrumNumber(2), indexInfo.spectrumNumber(2)); + TS_ASSERT_EQUALS(result.spectrumNumber(3), indexInfo.spectrumNumber(3)); + TS_ASSERT_EQUALS(result.spectrumNumber(4), indexInfo.spectrumNumber(4)); + TS_ASSERT_EQUALS(result.spectrumNumber(5), indexInfo.spectrumNumber(5)); + TS_ASSERT_EQUALS(result.spectrumNumber(6), indexInfo.spectrumNumber(6)); + } + + void test_StorageMode_Cloned() { runParallel(run_StorageMode_Cloned); } +}; + +#endif /* MANTID_INDEXING_SCATTERTEST_H_ */ diff --git a/Framework/Indexing/test/SpectrumNumberTranslatorTest.h b/Framework/Indexing/test/SpectrumNumberTranslatorTest.h index 0f988d382ba4522ac67f99cb2b2b225c6d6b495d..476c07d7ffed9d45f90401c83496d7231ca55b9a 100644 --- a/Framework/Indexing/test/SpectrumNumberTranslatorTest.h +++ b/Framework/Indexing/test/SpectrumNumberTranslatorTest.h @@ -64,6 +64,16 @@ public: PartitionIndex(0))); } + void test_construct_empty() { + std::vector<SpectrumNumber> spectrumNumbers; + TS_ASSERT_THROWS_NOTHING(SpectrumNumberTranslator( + spectrumNumbers, Kernel::make_unique<RoundRobinPartitioner>( + 1, PartitionIndex(0), + Partitioner::MonitorStrategy::CloneOnEachPartition, + std::vector<GlobalSpectrumIndex>{}), + PartitionIndex(0))); + } + void test_construct_bad_spectrum_numbers() { auto numbers = {1, 2, 3, 3}; std::vector<SpectrumNumber> spectrumNumbers(numbers.begin(), numbers.end()); @@ -76,6 +86,58 @@ public: PartitionIndex(0))); } + void test_construct_parent() { + auto numbers = {1, 2, 3, 4}; + std::vector<SpectrumNumber> spectrumNumbers(numbers.begin(), numbers.end()); + SpectrumNumberTranslator parent( + spectrumNumbers, Kernel::make_unique<RoundRobinPartitioner>( + 1, PartitionIndex(0), + Partitioner::MonitorStrategy::CloneOnEachPartition, + std::vector<GlobalSpectrumIndex>{}), + PartitionIndex(0)); + + TS_ASSERT_THROWS_NOTHING(SpectrumNumberTranslator(spectrumNumbers, parent)); + spectrumNumbers.erase(spectrumNumbers.begin() + 1); + TS_ASSERT_THROWS_NOTHING(SpectrumNumberTranslator(spectrumNumbers, parent)); + spectrumNumbers.erase(spectrumNumbers.begin()); + TS_ASSERT_THROWS_NOTHING(SpectrumNumberTranslator(spectrumNumbers, parent)); + spectrumNumbers.erase(spectrumNumbers.begin()); + TS_ASSERT_THROWS_NOTHING(SpectrumNumberTranslator(spectrumNumbers, parent)); + spectrumNumbers.erase(spectrumNumbers.begin()); + TS_ASSERT_THROWS_NOTHING(SpectrumNumberTranslator(spectrumNumbers, parent)); + } + + void test_construct_parent_reorder() { + auto numbers = {1, 2, 3, 4}; + std::vector<SpectrumNumber> spectrumNumbers(numbers.begin(), numbers.end()); + SpectrumNumberTranslator parent( + spectrumNumbers, Kernel::make_unique<RoundRobinPartitioner>( + 1, PartitionIndex(0), + Partitioner::MonitorStrategy::CloneOnEachPartition, + std::vector<GlobalSpectrumIndex>{}), + PartitionIndex(0)); + + std::iter_swap(spectrumNumbers.begin(), spectrumNumbers.end() - 1); + SpectrumNumberTranslator reordered(spectrumNumbers, parent); + TS_ASSERT_EQUALS(reordered.spectrumNumber(0), 4); + TS_ASSERT_EQUALS(reordered.spectrumNumber(3), 1); + } + + void test_construct_parent_bad_spectrum_numbers() { + auto numbers = {1, 2, 3, 4}; + std::vector<SpectrumNumber> spectrumNumbers(numbers.begin(), numbers.end()); + SpectrumNumberTranslator parent( + spectrumNumbers, Kernel::make_unique<RoundRobinPartitioner>( + 1, PartitionIndex(0), + Partitioner::MonitorStrategy::CloneOnEachPartition, + std::vector<GlobalSpectrumIndex>{}), + PartitionIndex(0)); + + spectrumNumbers[1] = 7; // 7 is not in parent. + TS_ASSERT_THROWS(SpectrumNumberTranslator(spectrumNumbers, parent), + std::out_of_range); + } + void test_access_bad_spectrum_numbers() { auto numbers = {1, 2, 3, 3}; std::vector<SpectrumNumber> spectrumNumbers(numbers.begin(), numbers.end()); @@ -182,8 +244,9 @@ public: auto translator = makeTranslator(1, 0); auto set = translator->makeIndexSet(SpectrumNumber(1), SpectrumNumber(5)); TS_ASSERT_EQUALS(set.size(), 4); - TS_ASSERT_EQUALS(set[0], 0); - TS_ASSERT_EQUALS(set[1], 1); + // IndexSet is ordered by spectrum number. + TS_ASSERT_EQUALS(set[0], 1); + TS_ASSERT_EQUALS(set[1], 0); TS_ASSERT_EQUALS(set[2], 2); TS_ASSERT_EQUALS(set[3], 3); } @@ -269,8 +332,9 @@ public: auto translator = makeTranslator(1, 0); auto set1 = translator->makeIndexSet(makeSpectrumNumbers({1, 2})); TS_ASSERT_EQUALS(set1.size(), 2); - TS_ASSERT_EQUALS(set1[0], 0); - TS_ASSERT_EQUALS(set1[1], 1); + // Order of spectrum numbers preserved. + TS_ASSERT_EQUALS(set1[0], 1); + TS_ASSERT_EQUALS(set1[1], 0); auto set2 = translator->makeIndexSet(makeSpectrumNumbers({4, 5})); TS_ASSERT_EQUALS(set2.size(), 2); TS_ASSERT_EQUALS(set2[0], 2); @@ -332,6 +396,31 @@ public: TS_ASSERT_EQUALS(set2.size(), 1); TS_ASSERT_EQUALS(set2[0], 1); } + + void test_construct_parent_3_ranks() { + auto parent = makeTranslator(3, 0); + auto numbers = {2, 1, 4, 5}; + std::vector<SpectrumNumber> spectrumNumbers(numbers.begin(), numbers.end()); + + SpectrumNumberTranslator translator1(spectrumNumbers, *parent); + TS_ASSERT_EQUALS(translator1.globalSize(), 4); + TS_ASSERT_EQUALS(translator1.localSize(), 2); // 2 and 5 are on this rank. + + spectrumNumbers.erase(spectrumNumbers.begin()); + SpectrumNumberTranslator translator2(spectrumNumbers, *parent); + TS_ASSERT_EQUALS(translator2.globalSize(), 3); + TS_ASSERT_EQUALS(translator2.localSize(), 1); + + spectrumNumbers.erase(spectrumNumbers.begin()); + SpectrumNumberTranslator translator3(spectrumNumbers, *parent); + TS_ASSERT_EQUALS(translator3.globalSize(), 2); + TS_ASSERT_EQUALS(translator3.localSize(), 1); + + spectrumNumbers.erase(spectrumNumbers.end() - 1); + SpectrumNumberTranslator translator4(spectrumNumbers, *parent); + TS_ASSERT_EQUALS(translator4.globalSize(), 1); + TS_ASSERT_EQUALS(translator4.localSize(), 0); + } }; #endif /* MANTID_INDEXING_SPECTRUMNUMBERTRANSLATORTEST_H_ */ diff --git a/Framework/Parallel/CMakeLists.txt b/Framework/Parallel/CMakeLists.txt index a40bb488af84f33f16a237231c35ce5dfcb4eac9..ff63c095bde7ef1df28fdc466aa1e900922ad482 100644 --- a/Framework/Parallel/CMakeLists.txt +++ b/Framework/Parallel/CMakeLists.txt @@ -1,6 +1,9 @@ set ( SRC_FILES src/Communicator.cpp src/ExecutionMode.cpp + src/IO/Chunker.cpp + src/IO/EventLoader.cpp + src/IO/EventParser.cpp src/Request.cpp src/StorageMode.cpp src/ThreadingBackend.cpp @@ -10,16 +13,31 @@ set ( INC_FILES inc/MantidParallel/Collectives.h inc/MantidParallel/Communicator.h inc/MantidParallel/ExecutionMode.h + inc/MantidParallel/IO/Chunker.h + inc/MantidParallel/IO/EventDataPartitioner.h + inc/MantidParallel/IO/EventLoader.h + inc/MantidParallel/IO/EventLoaderHelpers.h + inc/MantidParallel/IO/EventParser.h + inc/MantidParallel/IO/NXEventDataLoader.h + inc/MantidParallel/IO/NXEventDataSource.h + inc/MantidParallel/IO/PulseTimeGenerator.h + inc/MantidParallel/Nonblocking.h inc/MantidParallel/Request.h inc/MantidParallel/StorageMode.h inc/MantidParallel/ThreadingBackend.h ) set ( TEST_FILES + ChunkerTest.h CollectivesTest.h CommunicatorTest.h + EventDataPartitionerTest.h + EventLoaderTest.h + EventParserTest.h ExecutionModeTest.h + NonblockingTest.h ParallelRunnerTest.h + PulseTimeGeneratorTest.h RequestTest.h StorageModeTest.h ThreadingBackendTest.h @@ -44,8 +62,9 @@ endif () # Add to the 'Framework' group in VS set_property ( TARGET Parallel PROPERTY FOLDER "MantidFramework" ) +target_include_directories ( Parallel SYSTEM PRIVATE ${HDF5_INCLUDE_DIRS} ) target_link_libraries ( Parallel LINK_PRIVATE ${TCMALLOC_LIBRARIES_LINKTIME} - ${GSL_LIBRARIES} ${MANTIDLIBS} ) + ${GSL_LIBRARIES} ${MANTIDLIBS} ${HDF5_LIBRARIES} ) # Add the unit tests directory add_subdirectory ( test ) diff --git a/Framework/Parallel/inc/MantidParallel/Collectives.h b/Framework/Parallel/inc/MantidParallel/Collectives.h index 1dcf4aa350f08d190d210dcaf364bf9f15758096..05f3d1f5f073417f1dfbdbeef5c762936b58a829 100644 --- a/Framework/Parallel/inc/MantidParallel/Collectives.h +++ b/Framework/Parallel/inc/MantidParallel/Collectives.h @@ -3,6 +3,7 @@ #include "MantidParallel/Communicator.h" #include "MantidParallel/DllConfig.h" +#include "MantidParallel/Nonblocking.h" #ifdef MPI_EXPERIMENTAL #include <boost/mpi/collectives.hpp> @@ -68,6 +69,19 @@ void gather(const Communicator &comm, const T &in_value, int root) { "Parallel::gather on root rank without output argument."); } } + +template <typename T> +void all_to_all(const Communicator &comm, const std::vector<T> &in_values, + std::vector<T> &out_values) { + int tag{0}; + out_values.resize(comm.size()); + std::vector<Request> requests; + for (int rank = 0; rank < comm.size(); ++rank) + requests.emplace_back(comm.irecv(rank, tag, out_values[rank])); + for (int rank = 0; rank < comm.size(); ++rank) + comm.send(rank, tag, in_values[rank]); + wait_all(requests.begin(), requests.end()); +} } template <typename... T> void gather(const Communicator &comm, T &&... args) { @@ -78,6 +92,15 @@ template <typename... T> void gather(const Communicator &comm, T &&... args) { detail::gather(comm, std::forward<T>(args)...); } +template <typename... T> +void all_to_all(const Communicator &comm, T &&... args) { +#ifdef MPI_EXPERIMENTAL + if (!comm.hasBackend()) + return boost::mpi::all_to_all(comm, std::forward<T>(args)...); +#endif + detail::all_to_all(comm, std::forward<T>(args)...); +} + } // namespace Parallel } // namespace Mantid diff --git a/Framework/Parallel/inc/MantidParallel/IO/Chunker.h b/Framework/Parallel/inc/MantidParallel/IO/Chunker.h new file mode 100644 index 0000000000000000000000000000000000000000..077e885a1bf50b386e9399989ff05d8525eaaeb9 --- /dev/null +++ b/Framework/Parallel/inc/MantidParallel/IO/Chunker.h @@ -0,0 +1,69 @@ +#ifndef MANTID_PARALLEL_CHUNKER_H_ +#define MANTID_PARALLEL_CHUNKER_H_ + +#include <vector> + +#include "MantidParallel/DllConfig.h" + +namespace Mantid { +namespace Parallel { +class Communicator; +namespace IO { + +/** Chunking class for Parallel::IO::EventLoader. + + @author Simon Heybrock + @date 2017 + + Copyright © 2017 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge + National Laboratory & European Spallation Source + + This file is part of Mantid. + + Mantid is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + Mantid is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. + + File change history is stored at: <https://github.com/mantidproject/mantid> + Code Documentation is available at: <http://doxygen.mantidproject.org> +*/ +class MANTID_PARALLEL_DLL Chunker { +public: + struct LoadRange { + size_t bankIndex; + size_t eventOffset; + size_t eventCount; + }; + Chunker(const int numWorkers, const int worker, + const std::vector<size_t> &bankSizes, const size_t chunkSize); + + size_t chunkSize() const; + + std::vector<std::vector<int>> makeWorkerGroups() const; + std::vector<LoadRange> makeLoadRanges() const; + + static std::vector<std::pair<int, std::vector<size_t>>> + makeBalancedPartitioning(const int workers, const std::vector<size_t> &sizes); + +private: + const int m_worker; + const size_t m_chunkSize; + std::vector<size_t> m_bankSizes; + std::vector<size_t> m_chunkCounts; + std::vector<std::pair<int, std::vector<size_t>>> m_partitioning; +}; + +} // namespace IO +} // namespace Parallel +} // namespace Mantid + +#endif /* MANTID_PARALLEL_CHUNKER_H_ */ diff --git a/Framework/Parallel/inc/MantidParallel/IO/EventDataPartitioner.h b/Framework/Parallel/inc/MantidParallel/IO/EventDataPartitioner.h new file mode 100644 index 0000000000000000000000000000000000000000..f92ecef7299d53270726e7442c61f095d02a36a9 --- /dev/null +++ b/Framework/Parallel/inc/MantidParallel/IO/EventDataPartitioner.h @@ -0,0 +1,118 @@ +#ifndef MANTID_PARALLEL_EVENTDATAPARTITIONER_H_ +#define MANTID_PARALLEL_EVENTDATAPARTITIONER_H_ + +#include "MantidParallel/DllConfig.h" +#include "MantidParallel/IO/Chunker.h" +#include "MantidParallel/IO/PulseTimeGenerator.h" +#include "MantidTypes/Core/DateAndTime.h" + +namespace Mantid { +namespace Parallel { +namespace IO { + +/** Partition the event_time_offset and event_id entries and combine them with + pulse time information obtained from PulseTimeGenerator. Partitioning is to + obtain a separate vector of events for each rank in an MPI run of Mantid, + i.e., each event_id is assigned to a specific MPI rank. Currently a + round-robin partitioning scheme is hard-coded. + + @author Simon Heybrock + @date 2017 + + Copyright © 2017 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge + National Laboratory & European Spallation Source + + This file is part of Mantid. + + Mantid is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + Mantid is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. + + File change history is stored at: <https://github.com/mantidproject/mantid> + Code Documentation is available at: <http://doxygen.mantidproject.org> +*/ +namespace detail { +template <class TimeOffsetType> struct Event { + int32_t index; // local spectrum index + TimeOffsetType tof; + Types::Core::DateAndTime pulseTime; +}; +} + +template <class TimeOffsetType> class AbstractEventDataPartitioner { +public: + using Event = detail::Event<TimeOffsetType>; + AbstractEventDataPartitioner(const int numWorkers) + : m_numWorkers(numWorkers) {} + virtual ~AbstractEventDataPartitioner() = default; + + /** Partition given data. + * + * @param partitioned output vector of data for each partition + * @param globalSpectrumIndex list of spectrum indices + * @param eventTimeOffset list TOF values, same length as globalSpectrumIndex + * @param range defines start and end of data for lookup in PulseTimeGenerator + */ + virtual void partition(std::vector<std::vector<Event>> &partitioned, + const int32_t *globalSpectrumIndex, + const TimeOffsetType *eventTimeOffset, + const Chunker::LoadRange &range) = 0; + +protected: + const int m_numWorkers; +}; + +template <class IndexType, class TimeZeroType, class TimeOffsetType> +class EventDataPartitioner + : public AbstractEventDataPartitioner<TimeOffsetType> { +public: + using Event = detail::Event<TimeOffsetType>; + EventDataPartitioner(const int numWorkers, + PulseTimeGenerator<IndexType, TimeZeroType> &&gen) + : AbstractEventDataPartitioner<TimeOffsetType>(numWorkers), + m_pulseTimes(std::move(gen)) {} + + void partition(std::vector<std::vector<Event>> &partitioned, + const int32_t *globalSpectrumIndex, + const TimeOffsetType *eventTimeOffset, + const Chunker::LoadRange &range) override; + +private: + PulseTimeGenerator<IndexType, TimeZeroType> m_pulseTimes; +}; + +template <class IndexType, class TimeZeroType, class TimeOffsetType> +void EventDataPartitioner<IndexType, TimeZeroType, TimeOffsetType>::partition( + std::vector<std::vector<Event>> &partitioned, + const int32_t *globalSpectrumIndex, const TimeOffsetType *eventTimeOffset, + const Chunker::LoadRange &range) { + for (auto &item : partitioned) + item.clear(); + const auto workers = + AbstractEventDataPartitioner<TimeOffsetType>::m_numWorkers; + partitioned.resize(workers); + + m_pulseTimes.seek(range.eventOffset); + for (size_t event = 0; event < range.eventCount; ++event) { + // Currently this supports only a hard-coded round-robin partitioning. + int partition = globalSpectrumIndex[event] % workers; + auto index = globalSpectrumIndex[event] / workers; + partitioned[partition].emplace_back(detail::Event<TimeOffsetType>{ + index, eventTimeOffset[event], m_pulseTimes.next()}); + } +} + +} // namespace IO +} // namespace Parallel +} // namespace Mantid + +#endif /* MANTID_PARALLEL_EVENTDATAPARTITIONER_H_ */ diff --git a/Framework/Parallel/inc/MantidParallel/IO/EventLoader.h b/Framework/Parallel/inc/MantidParallel/IO/EventLoader.h new file mode 100644 index 0000000000000000000000000000000000000000..1007cfcc50011a2270572868fdeafaf95f0b8548 --- /dev/null +++ b/Framework/Parallel/inc/MantidParallel/IO/EventLoader.h @@ -0,0 +1,63 @@ +#ifndef MANTID_PARALLEL_EVENTLOADER_H_ +#define MANTID_PARALLEL_EVENTLOADER_H_ + +#include <string> +#include <unordered_map> +#include <vector> + +#include "MantidParallel/DllConfig.h" + +namespace Mantid { +namespace Types { +namespace Event { +class TofEvent; +} +} +namespace Parallel { +class Communicator; +namespace IO { + +/** Loader for event data from Nexus files with parallelism based on multiple + processes (MPI) for performance. + + @author Simon Heybrock + @date 2017 + + Copyright © 2017 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge + National Laboratory & European Spallation Source + + This file is part of Mantid. + + Mantid is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + Mantid is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. + + File change history is stored at: <https://github.com/mantidproject/mantid> + Code Documentation is available at: <http://doxygen.mantidproject.org> +*/ +namespace EventLoader { +MANTID_PARALLEL_DLL std::unordered_map<int32_t, size_t> +makeAnyEventIdToBankMap(const std::string &filename, + const std::string &groupName, + const std::vector<std::string> &bankNames); +MANTID_PARALLEL_DLL void +load(const Communicator &communicator, const std::string &filename, + const std::string &groupName, const std::vector<std::string> &bankNames, + const std::vector<int32_t> &bankOffsets, + std::vector<std::vector<Types::Event::TofEvent> *> eventLists); +} + +} // namespace IO +} // namespace Parallel +} // namespace Mantid + +#endif /* MANTID_PARALLEL_EVENTLOADER_H_ */ diff --git a/Framework/Parallel/inc/MantidParallel/IO/EventLoaderHelpers.h b/Framework/Parallel/inc/MantidParallel/IO/EventLoaderHelpers.h new file mode 100644 index 0000000000000000000000000000000000000000..bf02c371c9373e1a7cfd953ccc1b0820a6f90e43 --- /dev/null +++ b/Framework/Parallel/inc/MantidParallel/IO/EventLoaderHelpers.h @@ -0,0 +1,138 @@ +#ifndef MANTID_PARALLEL_EVENTLOADERHELPERS_H_ +#define MANTID_PARALLEL_EVENTLOADERHELPERS_H_ + +#include "MantidKernel/make_unique.h" +#include "MantidParallel/Communicator.h" +#include "MantidParallel/DllConfig.h" +#include "MantidParallel/IO/Chunker.h" +#include "MantidParallel/IO/EventParser.h" +#include "MantidParallel/IO/NXEventDataLoader.h" +#include "MantidParallel/IO/PulseTimeGenerator.h" + +namespace Mantid { +namespace Parallel { +namespace IO { + +/** Private parts of EventLoader. + + @author Simon Heybrock + @date 2017 + + Copyright © 2017 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge + National Laboratory & European Spallation Source + + This file is part of Mantid. + + Mantid is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + Mantid is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. + + File change history is stored at: <https://github.com/mantidproject/mantid> + Code Documentation is available at: <http://doxygen.mantidproject.org> +*/ +namespace EventLoader { +/// Read number of events in given banks from file. +std::vector<size_t> readBankSizes(const H5::Group &group, + const std::vector<std::string> &bankNames) { + std::vector<size_t> bankSizes; + for (const auto &bankName : bankNames) { + const H5::DataSet dataset = group.openDataSet(bankName + "/event_id"); + const H5::DataSpace dataSpace = dataset.getSpace(); + bankSizes.push_back(dataSpace.getSelectNpoints()); + } + return bankSizes; +} + +H5::DataType readDataType(const H5::Group &group, + const std::vector<std::string> &bankNames, + const std::string &name) { + return group.openDataSet(bankNames.front() + "/" + name).getDataType(); +} + +template <class TimeOffsetType> +void load(const Chunker &chunker, NXEventDataSource<TimeOffsetType> &dataSource, + EventParser<TimeOffsetType> &dataSink) { + const size_t chunkSize = chunker.chunkSize(); + const auto &ranges = chunker.makeLoadRanges(); + std::vector<int32_t> event_id(2 * chunkSize); + std::vector<TimeOffsetType> event_time_offset(2 * chunkSize); + + int64_t previousBank = -1; + size_t bufferOffset{0}; + for (const auto &range : ranges) { + std::unique_ptr<AbstractEventDataPartitioner<TimeOffsetType>> partitioner; + if (static_cast<int64_t>(range.bankIndex) != previousBank) { + partitioner = dataSource.setBankIndex(range.bankIndex); + } + dataSource.readEventID(event_id.data() + bufferOffset, range.eventOffset, + range.eventCount); + dataSource.readEventTimeOffset(event_time_offset.data() + bufferOffset, + range.eventOffset, range.eventCount); + if (previousBank != -1) + dataSink.wait(); + if (static_cast<int64_t>(range.bankIndex) != previousBank) { + dataSink.setEventDataPartitioner(std::move(partitioner)); + dataSink.setEventTimeOffsetUnit(dataSource.readEventTimeOffsetUnit()); + previousBank = range.bankIndex; + } + dataSink.startAsync(event_id.data() + bufferOffset, + event_time_offset.data() + bufferOffset, range); + bufferOffset = (bufferOffset + chunkSize) % (2 * chunkSize); + } + dataSink.wait(); +} + +template <class TimeOffsetType> +void load(const Communicator &comm, const H5::Group &group, + const std::vector<std::string> &bankNames, + const std::vector<int32_t> &bankOffsets, + std::vector<std::vector<Types::Event::TofEvent> *> eventLists) { + // In tests loading from a single SSD this chunk size seems close to the + // optimum. May need to be adjusted in the future (potentially dynamically) + // when loading from parallel file systems and running on a cluster. + const size_t chunkSize = 1024 * 1024; + // In tests loading from a single SSD there was no advantage using fewer + // processes for loading than for processing. This may be different in larger + // MPI runs on a cluster where limiting the number of IO processes may be + // required when accessing the parallel file system. + const Chunker chunker(comm.size(), comm.rank(), + readBankSizes(group, bankNames), chunkSize); + NXEventDataLoader<TimeOffsetType> loader(comm.size(), group, bankNames); + EventParser<TimeOffsetType> consumer(comm, chunker.makeWorkerGroups(), + bankOffsets, eventLists); + load<TimeOffsetType>(chunker, loader, consumer); +} + +/// Translate from H5::DataType to actual type, forward to load implementation. +template <class... T> void load(const H5::DataType &type, T &&... args) { + if (type == H5::PredType::NATIVE_INT32) + return load<int32_t>(args...); + if (type == H5::PredType::NATIVE_INT64) + return load<int64_t>(args...); + if (type == H5::PredType::NATIVE_UINT32) + return load<uint32_t>(args...); + if (type == H5::PredType::NATIVE_UINT64) + return load<uint64_t>(args...); + if (type == H5::PredType::NATIVE_FLOAT) + return load<float>(args...); + if (type == H5::PredType::NATIVE_DOUBLE) + return load<double>(args...); + throw std::runtime_error( + "Unsupported H5::DataType for event_time_offset in NXevent_data"); +} +} + +} // namespace IO +} // namespace Parallel +} // namespace Mantid + +#endif /* MANTID_PARALLEL_EVENTLOADERHELPERS_H_ */ diff --git a/Framework/Parallel/inc/MantidParallel/IO/EventParser.h b/Framework/Parallel/inc/MantidParallel/IO/EventParser.h new file mode 100644 index 0000000000000000000000000000000000000000..32cc6635317c48fc6fa7877c70a616a7a5c01822 --- /dev/null +++ b/Framework/Parallel/inc/MantidParallel/IO/EventParser.h @@ -0,0 +1,264 @@ +#ifndef MANTID_PARALLEL_IO_EVENT_PARSER_H +#define MANTID_PARALLEL_IO_EVENT_PARSER_H + +#include "MantidParallel/Collectives.h" +#include "MantidParallel/Communicator.h" +#include "MantidParallel/Nonblocking.h" +#include "MantidParallel/DllConfig.h" +#include "MantidParallel/IO/Chunker.h" +#include "MantidParallel/IO/EventDataPartitioner.h" +#include "MantidTypes/Event/TofEvent.h" + +#include <chrono> +#include <cstdint> +#include <numeric> +#include <thread> +#include <vector> +#include <xmmintrin.h> + +using Mantid::Types::Event::TofEvent; + +namespace Mantid { +namespace Parallel { +namespace IO { + +/** Distributed (MPI) parsing of Nexus events from a data stream. Data is +distributed accross MPI ranks for writing to event lists on the correct target +rank. + +@author Lamar Moore +@date 2017 + +Copyright © 2017 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge +National Laboratory & European Spallation Source + +This file is part of Mantid. + +Mantid is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3 of the License, or +(at your option) any later version. + +Mantid is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see <http://www.gnu.org/licenses/>. + +File change history is stored at: <https://github.com/mantidproject/mantid> +Code Documentation is available at: <http://doxygen.mantidproject.org> +*/ +namespace detail { +void MANTID_PARALLEL_DLL eventIdToGlobalSpectrumIndex(int32_t *event_id_start, + size_t count, + const int32_t bankOffset); +} + +template <class TimeOffsetType> class EventParser { +public: + using Event = detail::Event<TimeOffsetType>; + EventParser(const Communicator &comm, + std::vector<std::vector<int>> rankGroups, + std::vector<int32_t> bankOffsets, + std::vector<std::vector<Types::Event::TofEvent> *> eventLists); + + void setEventDataPartitioner(std::unique_ptr< + AbstractEventDataPartitioner<TimeOffsetType>> partitioner); + void setEventTimeOffsetUnit(const std::string &unit); + + void startAsync(int32_t *event_id_start, + const TimeOffsetType *event_time_offset_start, + const Chunker::LoadRange &range); + + void wait(); + +private: + void doParsing(int32_t *event_id_start, + const TimeOffsetType *event_time_offset_start, + const Chunker::LoadRange &range); + + void redistributeDataMPI(); + void populateEventLists(); + + // Default to 0 such that failure to set unit is easily detected. + double m_timeOffsetScale{0.0}; + Communicator m_comm; + std::vector<std::vector<int>> m_rankGroups; + std::vector<int32_t> m_bankOffsets; + std::vector<std::vector<Types::Event::TofEvent> *> m_eventLists; + std::unique_ptr<AbstractEventDataPartitioner<TimeOffsetType>> m_partitioner; + std::vector<std::vector<Event>> m_allRankData; + std::vector<Event> m_thisRankData; + std::thread m_thread; +}; + +/** Constructor for EventParser. + * + * @param rankGroups rank grouping for banks which determines how work is + * partitioned. The EventParser guarantees to process data obtained from ranks + * in the same group in-order to ensure pulse time ordering. + * @param bankOffsets used to convert from event ID to global spectrum index. + * This assumes that all event IDs within a bank a contiguous. + * @param eventLists workspace event lists which will be populated by the + * parser. The parser assumes that there always is a matching event list for any + * event ID that will be passed in via `startAsync`. + * @param globalToLocalSpectrumIndex lookup table which converts a global + * spectrum index to a spectrum index local to a given mpi rank + */ +template <class TimeOffsetType> +EventParser<TimeOffsetType>::EventParser( + const Communicator &comm, std::vector<std::vector<int>> rankGroups, + std::vector<int32_t> bankOffsets, + std::vector<std::vector<TofEvent> *> eventLists) + : m_comm(comm), m_rankGroups(std::move(rankGroups)), + m_bankOffsets(std::move(bankOffsets)), + m_eventLists(std::move(eventLists)) {} + +/// Set the EventDataPartitioner to use for parsing subsequent events. +template <class TimeOffsetType> +void EventParser<TimeOffsetType>::setEventDataPartitioner( + std::unique_ptr<AbstractEventDataPartitioner<TimeOffsetType>> partitioner) { + // We hold (and use) the PulseTimeGenerator via a virtual base class to avoid + // the need of having IndexType and TimeZeroType as templates for the whole + // class. + m_partitioner = std::move(partitioner); +} + +/** Set the unit of the values in `event_time_offset`. + * + * The unit is used to initialize a scale factor needed for conversion of + * time-of-flight to microseconds, the unit used by TofEvent. */ +template <class TimeOffsetType> +void EventParser<TimeOffsetType>::setEventTimeOffsetUnit( + const std::string &unit) { + constexpr char second[] = "second"; + constexpr char microsecond[] = "microsecond"; + constexpr char nanosecond[] = "nanosecond"; + + if (unit == second) { + m_timeOffsetScale = 1e6; + return; + } + if (unit == microsecond) { + m_timeOffsetScale = 1.0; + return; + } + if (unit == nanosecond) { + m_timeOffsetScale = 1e-3; + return; + } + throw std::runtime_error("EventParser: unsupported unit `" + unit + + "` for event_time_offset"); +} + +/// Convert m_allRankData into m_thisRankData by means of redistribution via +/// MPI. +template <class TimeOffsetType> +void EventParser<TimeOffsetType>::redistributeDataMPI() { + if (m_comm.size() == 1) { + m_thisRankData = m_allRankData.front(); + return; + } + + std::vector<int> sizes(m_allRankData.size()); + std::transform(m_allRankData.cbegin(), m_allRankData.cend(), sizes.begin(), + [](const std::vector<Event> &vec) { + return static_cast<int>(vec.size()); + }); + std::vector<int> recv_sizes(m_allRankData.size()); + Parallel::all_to_all(m_comm, sizes, recv_sizes); + + auto total_size = std::accumulate(recv_sizes.begin(), recv_sizes.end(), 0); + m_thisRankData.resize(total_size); + size_t offset = 0; + std::vector<Parallel::Request> recv_requests; + for (int rank = 0; rank < m_comm.size(); ++rank) { + if (recv_sizes[rank] == 0) + continue; + int tag = 0; + auto buffer = reinterpret_cast<char *>(m_thisRankData.data() + offset); + int size = recv_sizes[rank] * static_cast<int>(sizeof(Event)); + recv_requests.emplace_back(m_comm.irecv(rank, tag, buffer, size)); + offset += recv_sizes[rank]; + } + + std::vector<Parallel::Request> send_requests; + for (int rank = 0; rank < m_comm.size(); ++rank) { + const auto &vec = m_allRankData[rank]; + if (vec.size() == 0) + continue; + int tag = 0; + send_requests.emplace_back( + m_comm.isend(rank, tag, reinterpret_cast<const char *>(vec.data()), + static_cast<int>(vec.size() * sizeof(Event)))); + } + + Parallel::wait_all(send_requests.begin(), send_requests.end()); + Parallel::wait_all(recv_requests.begin(), recv_requests.end()); +} + +/// Append events in m_thisRankData to m_eventLists. +template <class TimeOffsetType> +void EventParser<TimeOffsetType>::populateEventLists() { + for (const auto &event : m_thisRankData) { + m_eventLists[event.index]->emplace_back( + m_timeOffsetScale * static_cast<double>(event.tof), event.pulseTime); + // In general `index` is random so this loop suffers from frequent cache + // misses (probably because the hardware prefetchers cannot keep up with the + // number of different memory locations that are getting accessed). We + // manually prefetch into L2 cache to reduce the amount of misses. + _mm_prefetch( + reinterpret_cast<char *>(&m_eventLists[event.index]->back() + 1), + _MM_HINT_T1); + } +} + +/** Accepts raw data from file which has been pre-treated and sorted into chunks + * for parsing. The parser extracts event data from the provided buffers, + * separates then according to MPI ranks and then appends them to the workspace + * event list. Asynchronously starts parsing wait() must be called before + * attempting to invoke this method subsequently. + * @param event_id_start Buffer containing event IDs. + * @param event_time_offset_start Buffer containing TOD. + * @param range contains information on the detector bank which corresponds to + * the data in the buffers, the file index offset where data starts and the + * number of elements in the data array. + */ +template <class TimeOffsetType> +void EventParser<TimeOffsetType>::startAsync( + int32_t *event_id_start, const TimeOffsetType *event_time_offset_start, + const Chunker::LoadRange &range) { + // Wrapped in lambda because std::thread is unable to specialize doParsing on + // its own + m_thread = + std::thread([this, event_id_start, event_time_offset_start, &range] { + doParsing(event_id_start, event_time_offset_start, range); + }); +} + +template <class TimeOffsetType> +void EventParser<TimeOffsetType>::doParsing( + int32_t *event_id_start, const TimeOffsetType *event_time_offset_start, + const Chunker::LoadRange &range) { + // change event_id_start in place + detail::eventIdToGlobalSpectrumIndex(event_id_start, range.eventCount, + m_bankOffsets[range.bankIndex]); + + // event_id_start now contains globalSpectrumIndex + m_partitioner->partition(m_allRankData, event_id_start, + event_time_offset_start, range); + + redistributeDataMPI(); + populateEventLists(); +} + +template <class TimeOffsetType> void EventParser<TimeOffsetType>::wait() { + m_thread.join(); +} + +} // namespace IO +} // namespace Parallel +} // namespace Mantid +#endif // MANTID_PARALLEL_IO_EVENT_PARSER_H diff --git a/Framework/Parallel/inc/MantidParallel/IO/NXEventDataLoader.h b/Framework/Parallel/inc/MantidParallel/IO/NXEventDataLoader.h new file mode 100644 index 0000000000000000000000000000000000000000..40e429b2532a3e90bc14b7fe2f3d0ed9fe6a125c --- /dev/null +++ b/Framework/Parallel/inc/MantidParallel/IO/NXEventDataLoader.h @@ -0,0 +1,242 @@ +#ifndef MANTID_PARALLEL_IO_NXEVENTDATALOADER_H_ +#define MANTID_PARALLEL_IO_NXEVENTDATALOADER_H_ + +#include <vector> +#include <H5Cpp.h> + +#include "MantidParallel/DllConfig.h" +#include "MantidParallel/IO/NXEventDataSource.h" +#include "MantidParallel/IO/PulseTimeGenerator.h" +#include "MantidTypes/Core/DateAndTime.h" +#include "MantidKernel/make_unique.h" + +namespace Mantid { +namespace Parallel { +namespace IO { + +/** NXEventDataLoader is used to load entries from the the Nexus NXevent_data + group, in particular event_index, event_time_zero, event_id, and + event_time_offset. The class is templated such that the types of + event_index, event_time_zero, and event_time_offset can be set as required. + + @author Simon Heybrock + @date 2017 + + Copyright © 2017 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge + National Laboratory & European Spallation Source + + This file is part of Mantid. + + Mantid is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + Mantid is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. + + File change history is stored at: <https://github.com/mantidproject/mantid> + Code Documentation is available at: <http://doxygen.mantidproject.org> +*/ +template <class TimeOffsetType> +class NXEventDataLoader : public NXEventDataSource<TimeOffsetType> { +public: + NXEventDataLoader(const int numWorkers, const H5::Group &group, + std::vector<std::string> bankNames); + + std::unique_ptr<AbstractEventDataPartitioner<TimeOffsetType>> + setBankIndex(const size_t bank) override; + + void readEventID(int32_t *event_id, size_t start, + size_t count) const override; + void readEventTimeOffset(TimeOffsetType *event_time_offset, size_t start, + size_t count) const override; + std::string readEventTimeOffsetUnit() const override; + +private: + const int m_numWorkers; + const H5::Group m_root; + H5::Group m_group; + const std::vector<std::string> m_bankNames; + H5::DataSet m_id; + H5::DataSet m_time_offset; +}; + +namespace detail { +/// Read complete data set from group and return the contents as a vector. +template <class T> +std::vector<T> read(const H5::Group &group, const std::string &dataSetName) { + H5::DataSet dataSet = group.openDataSet(dataSetName); + H5::DataType dataType = dataSet.getDataType(); + H5::DataSpace dataSpace = dataSet.getSpace(); + std::vector<T> result; + result.resize(dataSpace.getSelectNpoints()); + dataSet.read(result.data(), dataType); + return result; +} + +/** Read subset of data set and write the result into buffer. + * + * The subset is given by a start index and a count. */ +template <class T> +void read(T *buffer, const H5::DataSet &dataSet, size_t start, size_t count) { + auto hstart = static_cast<hsize_t>(start); + auto hcount = static_cast<hsize_t>(count); + H5::DataType dataType = dataSet.getDataType(); + H5::DataSpace dataSpace = dataSet.getSpace(); + if ((static_cast<int64_t>(dataSpace.getSelectNpoints()) - + static_cast<int64_t>(hstart)) <= 0) + throw std::out_of_range("Start index is beyond end of file"); + if (hcount > dataSpace.getSelectNpoints() - hstart) + throw std::out_of_range("End index is beyond end of file"); + dataSpace.selectHyperslab(H5S_SELECT_SET, &hcount, &hstart); + H5::DataSpace memSpace(1, &hcount); + dataSet.read(buffer, dataType, memSpace, dataSpace); +} + +/** Read subset of data set from group and write the result into buffer. + * + * The subset is given by a start index and a count. */ +template <class T> +void read(T *buffer, const H5::Group &group, const std::string &dataSetName, + size_t start, size_t count) { + H5::DataSet dataSet = group.openDataSet(dataSetName); + read(buffer, dataSet, start, count); +} + +std::string readAttribute(const H5::DataSet &dataSet, + const std::string &attributeName) { + const auto &attr = dataSet.openAttribute(attributeName); + std::string value; + attr.read(attr.getDataType(), value); + return value; +} + +template <class TimeOffsetType, class IndexType, class TimeZeroType> +std::unique_ptr<AbstractEventDataPartitioner<TimeOffsetType>> +makeEventDataPartitioner(const H5::Group &group, const int numWorkers) { + const auto timeZero = group.openDataSet("event_time_zero"); + int64_t time_zero_offset{0}; + // libhdf5 on Ubuntu 14.04 is too old to support timeZero.attrExists("offset") + // and Attribute::readAttribute, check and read manually. + hid_t attr_id = H5Aopen(timeZero.getId(), "offset", H5P_DEFAULT); + if (attr_id > 0) { + const H5::Attribute attr(attr_id); + std::string offset; + attr.read(attr.getDataType(), offset); + time_zero_offset = Types::Core::DateAndTime(offset).totalNanoseconds(); + } + return Kernel::make_unique< + EventDataPartitioner<IndexType, TimeZeroType, TimeOffsetType>>( + numWorkers, PulseTimeGenerator<IndexType, TimeZeroType>{ + read<IndexType>(group, "event_index"), + read<TimeZeroType>(group, "event_time_zero"), + readAttribute(timeZero, "units"), time_zero_offset}); +} + +template <class R, class... T1, class... T2> +std::unique_ptr<AbstractEventDataPartitioner<R>> +makeEventDataPartitioner(const H5::DataType &type, T2 &&... args); + +template <class R, class... T1> struct ConditionalFloat { + template <class... T2> + static std::unique_ptr<AbstractEventDataPartitioner<R>> + forward(const H5::DataType &type, T2 &&... args) { + if (type == H5::PredType::NATIVE_FLOAT) + return makeEventDataPartitioner<R, T1..., float>(args...); + if (type == H5::PredType::NATIVE_DOUBLE) + return makeEventDataPartitioner<R, T1..., double>(args...); + throw std::runtime_error( + "Unsupported H5::DataType for entry in NXevent_data"); + } +}; + +// Specialization for empty T1, i.e., first type argument `event_index`, which +// must be integer. +template <class R> struct ConditionalFloat<R> { + template <class... T2> + static std::unique_ptr<AbstractEventDataPartitioner<R>> + forward(const H5::DataType &, T2 &&...) { + throw std::runtime_error("Unsupported H5::DataType for event_index in " + "NXevent_data, must be integer"); + } +}; + +template <class R, class... T1, class... T2> +std::unique_ptr<AbstractEventDataPartitioner<R>> +makeEventDataPartitioner(const H5::DataType &type, T2 &&... args) { + // Translate from H5::DataType to actual type. Done step by step to avoid + // combinatoric explosion. The T1 parameter pack holds the final template + // arguments we want. The T2 parameter pack represents the remaining + // H5::DataType arguments and any other arguments. In every call we peel off + // the first entry from the T2 pack and append it to T1. This stops once the + // next argument in args is not of type H5::DataType anymore, allowing us to + // pass arbitrary extra arguments in the second part of args. + if (type == H5::PredType::NATIVE_INT32) + return makeEventDataPartitioner<R, T1..., int32_t>(args...); + if (type == H5::PredType::NATIVE_INT64) + return makeEventDataPartitioner<R, T1..., int64_t>(args...); + if (type == H5::PredType::NATIVE_UINT32) + return makeEventDataPartitioner<R, T1..., uint32_t>(args...); + if (type == H5::PredType::NATIVE_UINT64) + return makeEventDataPartitioner<R, T1..., uint64_t>(args...); + // Compile-time branching for float types. + return ConditionalFloat<R, T1...>::forward(type, args...); +} +} + +/** Constructor from group and bank names in group to load from. + * + * Template TimeOffsetType -> type used for reading event_time_offset */ +template <class TimeOffsetType> +NXEventDataLoader<TimeOffsetType>::NXEventDataLoader( + const int numWorkers, const H5::Group &group, + std::vector<std::string> bankNames) + : m_numWorkers(numWorkers), m_root(group), + m_bankNames(std::move(bankNames)) {} + +/// Set the bank index and return a EventDataPartitioner for that bank. +template <class TimeOffsetType> +std::unique_ptr<AbstractEventDataPartitioner<TimeOffsetType>> +NXEventDataLoader<TimeOffsetType>::setBankIndex(const size_t bank) { + m_group = m_root.openGroup(m_bankNames[bank]); + m_id = m_group.openDataSet("event_id"); + m_time_offset = m_group.openDataSet("event_time_offset"); + return detail::makeEventDataPartitioner<TimeOffsetType>( + m_group.openDataSet("event_index").getDataType(), + m_group.openDataSet("event_time_zero").getDataType(), m_group, + m_numWorkers); +} + +/// Read subset given by start and count from event_id and write it into buffer. +template <class TimeOffsetType> +void NXEventDataLoader<TimeOffsetType>::readEventID(int32_t *buffer, + size_t start, + size_t count) const { + detail::read(buffer, m_id, start, count); +} + +/// Read subset given by start and count from event_time_offset and write it +/// into buffer. +template <class TimeOffsetType> +void NXEventDataLoader<TimeOffsetType>::readEventTimeOffset( + TimeOffsetType *buffer, size_t start, size_t count) const { + detail::read(buffer, m_time_offset, start, count); +} + +/// Read and return the `units` attribute from event_time_offset. +template <class TimeOffsetType> +std::string NXEventDataLoader<TimeOffsetType>::readEventTimeOffsetUnit() const { + return detail::readAttribute(m_time_offset, "units"); +} + +} // namespace IO +} // namespace Parallel +} // namespace Mantid + +#endif /* MANTID_PARALLEL_IO_NXEVENTDATALOADER_H_ */ diff --git a/Framework/Parallel/inc/MantidParallel/IO/NXEventDataSource.h b/Framework/Parallel/inc/MantidParallel/IO/NXEventDataSource.h new file mode 100644 index 0000000000000000000000000000000000000000..17382e79ef01c6f7cb0183de5bc63b6d836b57c5 --- /dev/null +++ b/Framework/Parallel/inc/MantidParallel/IO/NXEventDataSource.h @@ -0,0 +1,59 @@ +#ifndef MANTID_PARALLEL_NXEVENTDATASOURCE_H_ +#define MANTID_PARALLEL_NXEVENTDATASOURCE_H_ + +#include <vector> + +#include "MantidParallel/DllConfig.h" +#include "MantidParallel/IO/EventDataPartitioner.h" + +namespace Mantid { +namespace Parallel { +namespace IO { + +/** Abstract base class for sources of NXevent_data. For files this is + subclassed in NXEventDataLoader. The base class exists for testing purposes + and potentially for supporting event streams in the future. + + @author Simon Heybrock + @date 2017 + + Copyright © 2017 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge + National Laboratory & European Spallation Source + + This file is part of Mantid. + + Mantid is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + Mantid is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. + + File change history is stored at: <https://github.com/mantidproject/mantid> + Code Documentation is available at: <http://doxygen.mantidproject.org> +*/ +template <class TimeOffsetType> class NXEventDataSource { +public: + virtual ~NXEventDataSource() = default; + + virtual std::unique_ptr<AbstractEventDataPartitioner<TimeOffsetType>> + setBankIndex(const size_t bank) = 0; + + virtual void readEventID(int32_t *event_id, size_t start, + size_t count) const = 0; + virtual void readEventTimeOffset(TimeOffsetType *event_time_offset, + size_t start, size_t count) const = 0; + virtual std::string readEventTimeOffsetUnit() const = 0; +}; + +} // namespace IO +} // namespace Parallel +} // namespace Mantid + +#endif /* MANTID_PARALLEL_NXEVENTDATASOURCE_H_ */ diff --git a/Framework/Parallel/inc/MantidParallel/IO/PulseTimeGenerator.h b/Framework/Parallel/inc/MantidParallel/IO/PulseTimeGenerator.h new file mode 100644 index 0000000000000000000000000000000000000000..83227dee4fd31816c5dec106f7ad4abaf458e94c --- /dev/null +++ b/Framework/Parallel/inc/MantidParallel/IO/PulseTimeGenerator.h @@ -0,0 +1,141 @@ +#ifndef MANTID_PARALLEL_PULSETIMEGENERATOR_H_ +#define MANTID_PARALLEL_PULSETIMEGENERATOR_H_ + +#include "MantidParallel/DllConfig.h" +#include "MantidTypes/Core/DateAndTime.h" + +namespace Mantid { +namespace Parallel { +namespace IO { + +/** Generator for pulse times based in input from an NXevent_data entry from a + Nexus file. Used to generate a sequence of pulse times for a series of events + by doing a lookup of the event in the event_index field and returning the + corresponding pulse time obtained from event_time_zero combined with the + optional offset parameter. + + @author Simon Heybrock + @date 2017 + + Copyright © 2017 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge + National Laboratory & European Spallation Source + + This file is part of Mantid. + + Mantid is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + Mantid is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. + + File change history is stored at: <https://github.com/mantidproject/mantid> + Code Documentation is available at: <http://doxygen.mantidproject.org> +*/ + +namespace detail { +constexpr char second[] = "second"; +constexpr char microsecond[] = "microsecond"; +constexpr char nanosecond[] = "nanosecond"; + +template <class TimeZeroType> +double scaleFromUnit( + const std::string &unit, + typename std::enable_if< + std::is_floating_point<TimeZeroType>::value>::type * = nullptr) { + if (unit == second) + return 1.0; + if (unit == microsecond) + return 1e-6; + if (unit == nanosecond) + return 1e-9; + throw std::runtime_error("PulseTimeGenerator: unsupported unit `" + unit + + "` for event_time_zero"); +} + +template <class TimeZeroType> +int64_t scaleFromUnit( + const std::string &unit, + typename std::enable_if<std::is_integral<TimeZeroType>::value>::type * = + nullptr) { + if (unit == nanosecond) + return 1; + throw std::runtime_error("PulseTimeGenerator: unsupported unit `" + unit + + "` for event_time_zero"); +} + +/// Convert any int or float type to corresponding 64 bit type needed for +/// passing into DateAndTime. +template <class T> struct IntOrFloat64Bit { using type = int64_t; }; +template <> struct IntOrFloat64Bit<float> { using type = double; }; +template <> struct IntOrFloat64Bit<double> { using type = double; }; +} + +template <class IndexType, class TimeZeroType> class PulseTimeGenerator { +public: + PulseTimeGenerator() = default; + + /// Constructor based on entries in NXevent_data. + PulseTimeGenerator(std::vector<IndexType> event_index, + std::vector<TimeZeroType> event_time_zero, + const std::string &event_time_zero_unit, + const int64_t event_time_zero_offset) + : m_index(std::move(event_index)), m_timeZero(std::move(event_time_zero)), + m_timeZeroScale( + detail::scaleFromUnit<TimeZeroType>(event_time_zero_unit)), + m_timeZeroOffset(event_time_zero_offset) {} + + /// Seek to given event index. + void seek(const size_t event) { + if (m_index.size() == 0) + throw std::runtime_error("Empty event index in PulseTimeGenerator"); + if (static_cast<IndexType>(event) < m_event) + m_pulse = 0; + m_event = static_cast<IndexType>(event); + for (; m_pulse < m_index.size() - 1; ++m_pulse) + if (m_event < m_index[m_pulse + 1]) + break; + m_pulseTime = getPulseTime(m_timeZeroOffset, m_timeZero[m_pulse]); + } + + /// Return pulse time for next event, and advance. Must call seek() first, at + /// least once. + Types::Core::DateAndTime next() { + while (m_pulse < m_index.size() - 1 && m_event == m_index[m_pulse + 1]) { + ++m_pulse; + m_pulseTime = getPulseTime(m_timeZeroOffset, m_timeZero[m_pulse]); + } + ++m_event; + return m_pulseTime; + } + +private: + Types::Core::DateAndTime getPulseTime(const Types::Core::DateAndTime &offset, + const TimeZeroType &eventTimeZero) { + return offset + + m_timeZeroScale * + static_cast< + typename detail::IntOrFloat64Bit<TimeZeroType>::type>( + eventTimeZero); + } + + IndexType m_event{0}; + size_t m_pulse{0}; + Types::Core::DateAndTime m_pulseTime; + std::vector<IndexType> m_index; + std::vector<TimeZeroType> m_timeZero; + typename detail::IntOrFloat64Bit<TimeZeroType>::type m_timeZeroScale; + Types::Core::DateAndTime m_timeZeroOffset; +}; + +} // namespace IO +} // namespace Parallel +} // namespace Mantid + +#endif /* MANTID_PARALLEL_PULSETIMEGENERATOR_H_ */ diff --git a/Framework/Parallel/inc/MantidParallel/Nonblocking.h b/Framework/Parallel/inc/MantidParallel/Nonblocking.h new file mode 100644 index 0000000000000000000000000000000000000000..a7d1df09fc239e7e0e1310e201006568c4cdeb0d --- /dev/null +++ b/Framework/Parallel/inc/MantidParallel/Nonblocking.h @@ -0,0 +1,63 @@ +#ifndef MANTID_PARALLEL_NONBLOCKING_H_ +#define MANTID_PARALLEL_NONBLOCKING_H_ + +#include "MantidParallel/DllConfig.h" +#include "MantidParallel/Request.h" + +#ifdef MPI_EXPERIMENTAL +#include <boost/mpi/nonblocking.hpp> +#endif + +namespace Mantid { +namespace Parallel { + +/** Wrapper for boost::mpi::nonblocking. For non-MPI builds an equivalent + implementation with reduced functionality is provided. + + @author Simon Heybrock + @date 2017 + + Copyright © 2017 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge + National Laboratory & European Spallation Source + + This file is part of Mantid. + + Mantid is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + Mantid is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. + + File change history is stored at: <https://github.com/mantidproject/mantid> + Code Documentation is available at: <http://doxygen.mantidproject.org> +*/ +template <typename ForwardIterator> +void wait_all(ForwardIterator begin, ForwardIterator end) { +#ifdef MPI_EXPERIMENTAL + class RequestIteratorWrapper : public ForwardIterator { + public: + RequestIteratorWrapper(const ForwardIterator &it) : ForwardIterator(it) {} + boost::mpi::request &operator*() { return ForwardIterator::operator*(); } + boost::mpi::request *operator->() { return &operator*(); } + }; + if (begin == end || !begin->hasBackend()) + return boost::mpi::wait_all(RequestIteratorWrapper(begin), + RequestIteratorWrapper(end)); +#endif + while (begin != end) { + (*begin).wait(); + ++begin; + } +} + +} // namespace Parallel +} // namespace Mantid + +#endif /* MANTID_PARALLEL_NONBLOCKING_H_ */ diff --git a/Framework/Parallel/inc/MantidParallel/Request.h b/Framework/Parallel/inc/MantidParallel/Request.h index 7fc1bdad5af5f73efcdea321747aaeaf6c4b4e6e..f167d2c84240468b0edc3eb444ea47b3be4a6dd3 100644 --- a/Framework/Parallel/inc/MantidParallel/Request.h +++ b/Framework/Parallel/inc/MantidParallel/Request.h @@ -50,6 +50,12 @@ public: void wait(); + bool hasBackend() const { return m_threadingBackend; } + +#ifdef MPI_EXPERIMENTAL + operator boost::mpi::request &() { return m_request; } +#endif + private: template <class Function> explicit Request(Function &&f); #ifdef MPI_EXPERIMENTAL diff --git a/Framework/Parallel/inc/MantidParallel/ThreadingBackend.h b/Framework/Parallel/inc/MantidParallel/ThreadingBackend.h index 0096ab173cd1a0ca1aed622d5a8d998048cd5892..a4c10f2d6f7430f6d024c1b3c53dd8844fffda03 100644 --- a/Framework/Parallel/inc/MantidParallel/ThreadingBackend.h +++ b/Framework/Parallel/inc/MantidParallel/ThreadingBackend.h @@ -8,6 +8,7 @@ #include <boost/archive/binary_oarchive.hpp> #include <boost/archive/binary_iarchive.hpp> +#include <chrono> #include <functional> #include <istream> #include <map> @@ -67,8 +68,9 @@ public: template <typename... T> Request isend(int source, int dest, int tag, T &&... args); - template <typename... T> - Request irecv(int dest, int source, int tag, T &&... args); + template <typename T> Request irecv(int dest, int source, int tag, T &&data); + template <typename T> + Request irecv(int dest, int source, int tag, T *data, const size_t count); private: int m_size{1}; @@ -77,6 +79,38 @@ private: std::mutex m_mutex; }; +namespace detail { +template <class T> +void saveToStream(boost::archive::binary_oarchive &oa, const T &data) { + oa.operator<<(data); +} +template <class T> +void saveToStream(boost::archive::binary_oarchive &oa, + const std::vector<T> &data) { + oa.operator<<(data); +} +template <class T> +void saveToStream(boost::archive::binary_oarchive &oa, const T *data, + const size_t count) { + for (size_t i = 0; i < count; ++i) + oa.operator<<(data[i]); +} +template <class T> +void loadFromStream(boost::archive::binary_iarchive &ia, T &data) { + ia.operator>>(data); +} +template <class T> +void loadFromStream(boost::archive::binary_iarchive &ia, std::vector<T> &data) { + ia.operator>>(data); +} +template <class T> +void loadFromStream(boost::archive::binary_iarchive &ia, T *data, + const size_t count) { + for (size_t i = 0; i < count; ++i) + ia.operator>>(data[i]); +} +} + template <typename... T> void ThreadingBackend::send(int source, int dest, int tag, T &&... args) { // Must wrap std::stringbuf in a unique_ptr since gcc on RHEL7 does not @@ -90,7 +124,7 @@ void ThreadingBackend::send(int source, int dest, int tag, T &&... args) { // though, since it is *not* writing to the buffer, somehow the oarchive // destructor must be doing something that requires the buffer. boost::archive::binary_oarchive oa(os); - oa.operator<<(std::forward<T>(args)...); + detail::saveToStream(oa, std::forward<T>(args)...); } std::lock_guard<std::mutex> lock(m_mutex); m_buffer[std::make_tuple(source, dest, tag)].push_back(std::move(buf)); @@ -101,6 +135,9 @@ void ThreadingBackend::recv(int dest, int source, int tag, T &&... args) { const auto key = std::make_tuple(source, dest, tag); std::unique_ptr<std::stringbuf> buf; while (true) { + // Sleep to reduce lock contention. Without this execution times can grow + // enormously on Windows. + std::this_thread::sleep_for(std::chrono::microseconds(10)); std::lock_guard<std::mutex> lock(m_mutex); auto it = m_buffer.find(key); if (it == m_buffer.end()) @@ -114,7 +151,7 @@ void ThreadingBackend::recv(int dest, int source, int tag, T &&... args) { } std::istream is(buf.get()); boost::archive::binary_iarchive ia(is); - ia.operator>>(std::forward<T>(args)...); + detail::loadFromStream(ia, std::forward<T>(args)...); } template <typename... T> @@ -123,10 +160,18 @@ Request ThreadingBackend::isend(int source, int dest, int tag, T &&... args) { return Request{}; } -template <typename... T> -Request ThreadingBackend::irecv(int dest, int source, int tag, T &&... args) { - return Request(std::bind(&ThreadingBackend::recv<T...>, this, dest, source, - tag, std::ref(std::forward<T>(args)...))); +template <typename T> +Request ThreadingBackend::irecv(int dest, int source, int tag, T &&data) { + return Request(std::bind(&ThreadingBackend::recv<T>, this, dest, source, tag, + std::ref(std::forward<T>(data)))); +} +template <typename T> +Request ThreadingBackend::irecv(int dest, int source, int tag, T *data, + const size_t count) { + // Pass (pointer) by value since reference to it may go out of scope. + return Request([this, dest, source, tag, data, count]() mutable { + recv(dest, source, tag, data, count); + }); } } // namespace detail diff --git a/Framework/Parallel/src/IO/Chunker.cpp b/Framework/Parallel/src/IO/Chunker.cpp new file mode 100644 index 0000000000000000000000000000000000000000..14d9b05c68a88c9a3a13f7684835abc85ce23248 --- /dev/null +++ b/Framework/Parallel/src/IO/Chunker.cpp @@ -0,0 +1,249 @@ +#include <numeric> + +#include "MantidParallel/Communicator.h" +#include "MantidParallel/IO/Chunker.h" + +namespace Mantid { +namespace Parallel { +namespace IO { + +namespace { +/** Helper to build partition (subgroup of workers with subgroup of banks). + * + * Elements of `sortedSizes` are <size, original index, done flag>. The + * `padding` argument is used to artificially increase the amount of work + * assigned to each group. This is used to deal with cases where + *`buildPartition` generates more groups than available workers. */ +std::pair<int, std::vector<size_t>> +buildPartition(const int totalWorkers, const size_t totalSize, + std::vector<std::tuple<size_t, size_t, bool>> &sortedSizes, + const size_t padding) { + const size_t perWorkerSize = (totalSize + totalWorkers - 1) / totalWorkers; + + // 1. Find largest unprocessed item + auto largest = + std::find_if_not(sortedSizes.begin(), sortedSizes.end(), + [](const std::tuple<size_t, size_t, bool> &item) { + return std::get<2>(item); + }); + std::vector<size_t> itemsInPartition{std::get<1>(*largest)}; + std::get<2>(*largest) = true; + + // 2. Number of workers needed for that item. + const size_t size = std::get<0>(*largest); + const int workers = + totalSize != 0 + ? static_cast<int>( + (static_cast<size_t>(totalWorkers) * size + totalSize - 1) / + totalSize) + : totalWorkers; + size_t remainder = workers * perWorkerSize - size + padding; + + // 3. Fill remainder with next largest fitting size(s) + for (auto &item : sortedSizes) { + if (std::get<2>(item)) + continue; + if (std::get<0>(item) <= remainder) { + std::get<2>(item) = true; + itemsInPartition.push_back(std::get<1>(item)); + remainder -= std::get<0>(item); + } + } + return {workers, itemsInPartition}; +} + +int numberOfWorkers( + const std::vector<std::pair<int, std::vector<size_t>>> &partitioning) { + int workers = 0; + for (const auto &item : partitioning) + workers += std::get<0>(item); + return workers; +} + +size_t taskSize(const std::pair<int, std::vector<size_t>> &partition, + const std::vector<size_t> &tasks) { + const int workers = std::get<0>(partition); + if (workers == 0) + return UINT64_MAX; + const auto &indices = std::get<1>(partition); + size_t total = 0; + for (const auto index : indices) + total += tasks[index]; + // Rounding *up*. Some workers in partition maybe have less work but we want + // the maximum. + return (total + workers - 1) / workers; +} +} + +/** Create a chunker based on bank sizes and chunk size. + * + * The `bankSizes` define the items of work to be split up amongst the workers. + * This is done using the given `chunkSize`, i.e., each bank size is cut into + * pieces of size `chunkSize` and all pieces are assigned to the requested + * number of workers. */ +Chunker::Chunker(const int numWorkers, const int worker, + const std::vector<size_t> &bankSizes, const size_t chunkSize) + : m_worker(worker), m_chunkSize(chunkSize), m_bankSizes(bankSizes) { + // Create partitions based on chunk counts. + m_chunkCounts = m_bankSizes; + const auto sizeToChunkCount = + [&](size_t &value) { value = (value + m_chunkSize - 1) / m_chunkSize; }; + std::for_each(m_chunkCounts.begin(), m_chunkCounts.end(), sizeToChunkCount); + m_partitioning = makeBalancedPartitioning(numWorkers, m_chunkCounts); +} + +size_t Chunker::chunkSize() const { return m_chunkSize; } + +std::vector<std::vector<int>> Chunker::makeWorkerGroups() const { + int worker{0}; + std::vector<std::vector<int>> workerGroups; + for (const auto &partition : m_partitioning) { + workerGroups.emplace_back(); + for (int i = 0; i < partition.first; ++i) + workerGroups.back().push_back(worker++); + } + return workerGroups; +} + +/** Returns a vector of LoadRanges based on parameters passed to the + * constructor. + * + * The ranges are optimized such that the number of workers per bank is + * minimized while at the same time achieving good load balance by making the + * number of chunks to be loaded by each worker as equal as possible. The + * current algorithm does not find the optimial solution for all edge cases but + * should usually yield a 'good-enough' approximation. There are two reasons for + * minimizing the number of workers per bank: + * 1. Avoid overhead from loading event_index and event_time_zero for a bank on + * more workers than necessary. + * 2. Reduce the number of banks a worker is loading from to allow more flexible + * ordering when redistributing data with MPI in the loader. + * If more than one worker is used to load a subset of banks, chunks are + * assigned in a round-robin fashion to workers. This is not reset when reaching + * the end of a bank, i.e., the worker loading the first chunk of the banks in a + * subset is *not* guarenteed to be the same for all banks. */ +std::vector<Chunker::LoadRange> Chunker::makeLoadRanges() const { + // Find our partition. + size_t partitionIndex = 0; + int firstWorkerSharingOurPartition = 0; + for (; partitionIndex < m_partitioning.size(); ++partitionIndex) { + const int workersInPartition = m_partitioning[partitionIndex].first; + if (firstWorkerSharingOurPartition + workersInPartition > m_worker) + break; + firstWorkerSharingOurPartition += workersInPartition; + } + const auto workersSharingOurPartition = m_partitioning[partitionIndex].first; + const auto &ourBanks = m_partitioning[partitionIndex].second; + + // Assign all chunks from all banks in this partition to workers in + // round-robin manner. + int64_t chunk = 0; + std::vector<LoadRange> ranges; + for (const auto bank : ourBanks) { + size_t current = 0; + while (current < m_bankSizes[bank]) { + if (chunk % workersSharingOurPartition == + (m_worker - firstWorkerSharingOurPartition)) { + size_t count = + std::min(current + m_chunkSize, m_bankSizes[bank]) - current; + ranges.push_back(LoadRange{bank, current, count}); + } + current += m_chunkSize; + chunk++; + } + } + + // Compute maximum chunk count (on any worker). + int64_t maxChunkCount = 0; + for (const auto &partition : m_partitioning) { + size_t chunksInPartition = 0; + for (const auto bank : partition.second) + chunksInPartition += m_chunkCounts[bank]; + int workersInPartition = partition.first; + int64_t maxChunkCountInPartition = + (chunksInPartition + workersInPartition - 1) / workersInPartition; + maxChunkCount = std::max(maxChunkCount, maxChunkCountInPartition); + } + ranges.resize(maxChunkCount); + + return ranges; +} + +/** Returns a vector of partitions of work (sizes) for given number of workers. + * + * The `sizes` argument defines the amount of work for a series of tasks. Here + * the task would be loading/processing a certain number of chunks from a file. + * The `workers` argument gives the number of workers to be used to process all + * tasks. The returned partitioning fulfils the following: + * - A task may be shared among workers. If workers share a task, all tasks they + * are working on are shared among that group of workers. + * - Groups of workers and distribution of tasks to groups tries to balance + * work, such that each worker has a roughly similar amount of work. + * Note that this method is public and static to allow for testing. */ +std::vector<std::pair<int, std::vector<size_t>>> +Chunker::makeBalancedPartitioning(const int workers, + const std::vector<size_t> &sizes) { + const auto totalSize = + std::accumulate(sizes.begin(), sizes.end(), static_cast<size_t>(0)); + // Indexed size vector such that we can sort it but still know original index. + // Elements are <size, original index, done flag> + std::vector<std::tuple<size_t, size_t, bool>> sortedSizes; + for (const auto size : sizes) + sortedSizes.emplace_back(size, sortedSizes.size(), false); + std::sort(sortedSizes.begin(), sortedSizes.end(), + [](const std::tuple<size_t, size_t, bool> &a, + const std::tuple<size_t, size_t, bool> &b) { + return std::get<0>(a) > std::get<0>(b); + }); + + std::vector<std::pair<int, std::vector<size_t>>> partitioning; + size_t numProcessed = 0; + size_t padding = 0; + const auto originalSortedSizes(sortedSizes); + while (numProcessed != sizes.size()) { + partitioning.emplace_back( + buildPartition(workers, totalSize, sortedSizes, padding)); + numProcessed += partitioning.back().second.size(); + if (static_cast<int>(partitioning.size()) > workers) { + partitioning.clear(); + numProcessed = 0; + padding += static_cast<size_t>( + std::max(1.0, static_cast<double>(totalSize) * 0.01)); + sortedSizes = originalSortedSizes; + } + } + + // buildPartition always rounds up when computing needed workers, so we have + // to reduce workers for some partitions such that we stay below given total + // workers. + int tooMany = numberOfWorkers(partitioning) - workers; + if (tooMany != 0) { + for (auto &item : partitioning) + std::get<0>(item)--; + std::vector<size_t> taskSizes; + for (const auto &partition : partitioning) + taskSizes.push_back(taskSize(partition, sizes)); + for (int i = 0; i < tooMany; ++i) { + const auto itemWithSmallestIncrease = + std::distance(taskSizes.begin(), + std::min_element(taskSizes.begin(), taskSizes.end())); + std::get<0>(partitioning[itemWithSmallestIncrease])--; + taskSizes[itemWithSmallestIncrease] = + taskSize(partitioning[itemWithSmallestIncrease], sizes); + } + for (auto &item : partitioning) + std::get<0>(item)++; + } + + // In some cases there are also unused workers, assign them such that client + // code has consistent partitioning. + int tooFew = workers - numberOfWorkers(partitioning); + if (tooFew != 0) + partitioning.push_back({tooFew, {}}); + + return partitioning; +} + +} // namespace IO +} // namespace Parallel +} // namespace Mantid diff --git a/Framework/Parallel/src/IO/EventLoader.cpp b/Framework/Parallel/src/IO/EventLoader.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b6c161dcc4efb8c2b37d013d6a65dc6d497686be --- /dev/null +++ b/Framework/Parallel/src/IO/EventLoader.cpp @@ -0,0 +1,51 @@ +#include "MantidParallel/IO/EventLoader.h" +#include "MantidParallel/IO/EventLoaderHelpers.h" +#include "MantidParallel/IO/NXEventDataLoader.h" + +#include <H5Cpp.h> + +namespace Mantid { +namespace Parallel { +namespace IO { +namespace EventLoader { + +/** Return a map from any one event ID in a bank to the bank index. + * + * For every bank there is one map entry, i.e., this is NOT a mapping from all + * IDs in a bank to the bank. The returned map will not contain an entry for + * banks that contain no events. */ +std::unordered_map<int32_t, size_t> +makeAnyEventIdToBankMap(const std::string &filename, + const std::string &groupName, + const std::vector<std::string> &bankNames) { + std::unordered_map<int32_t, size_t> idToBank; + H5::H5File file(filename, H5F_ACC_RDONLY); + H5::Group group = file.openGroup(groupName); + for (size_t i = 0; i < bankNames.size(); ++i) { + try { + int32_t eventId; + detail::read<int32_t>(&eventId, group, bankNames[i] + "/event_id", 0, 1); + idToBank[eventId] = i; + } catch (const std::out_of_range &) { + // No event in file, do not add to map. + } + } + return idToBank; +} + +/// Load events from given banks into event lists. +void load(const Communicator &comm, const std::string &filename, + const std::string &groupName, + const std::vector<std::string> &bankNames, + const std::vector<int32_t> &bankOffsets, + std::vector<std::vector<Types::Event::TofEvent> *> eventLists) { + H5::H5File file(filename, H5F_ACC_RDONLY); + H5::Group group = file.openGroup(groupName); + load(readDataType(group, bankNames, "event_time_offset"), comm, group, + bankNames, bankOffsets, std::move(eventLists)); +} +} + +} // namespace IO +} // namespace Parallel +} // namespace Mantid diff --git a/Framework/Parallel/src/IO/EventParser.cpp b/Framework/Parallel/src/IO/EventParser.cpp new file mode 100644 index 0000000000000000000000000000000000000000..70f00e1f5657edb6fb0e87a620fb0a2e888a6496 --- /dev/null +++ b/Framework/Parallel/src/IO/EventParser.cpp @@ -0,0 +1,27 @@ +#include "MantidParallel/IO/EventParser.h" + +namespace Mantid { +namespace Parallel { +namespace IO { +namespace detail { + +/** Transform event IDs to global spectrum numbers using the bankOffsets stored + * at object creation. + * + * The transformation is in-place to save memory bandwidth and modifies the + * range pointed to by `event_id_start`. + * @param event_id_start Starting position of chunk of data containing event + * IDs. + * @param count Number of items in data chunk + * @param bankOffset Offset to subtract from the array `event_id_start`. + */ +void eventIdToGlobalSpectrumIndex(int32_t *event_id_start, size_t count, + const int32_t bankOffset) { + for (size_t i = 0; i < count; ++i) + event_id_start[i] -= bankOffset; +} + +} // namespace detail +} // namespace IO +} // namespace Parallel +} // namespace Mantid diff --git a/Framework/Parallel/test/CMakeLists.txt b/Framework/Parallel/test/CMakeLists.txt index 7a061b3d6dbd8dd065f876f314d2caf37512eca6..a6eceff029e1c4d593486f4f5317ef446af683e4 100644 --- a/Framework/Parallel/test/CMakeLists.txt +++ b/Framework/Parallel/test/CMakeLists.txt @@ -8,10 +8,13 @@ if ( CXXTEST_FOUND ) ) cxxtest_add_test ( ParallelTest ${TEST_FILES} ${GMOCK_TEST_FILES}) + target_include_directories ( ParallelTest SYSTEM PRIVATE ${HDF5_INCLUDE_DIRS} ) target_link_libraries( ParallelTest LINK_PRIVATE ${TCMALLOC_LIBRARIES_LINKTIME} ${MANTIDLIBS} Parallel ${GMOCK_LIBRARIES} - ${GTEST_LIBRARIES} ) + ${GTEST_LIBRARIES} + ${HDF5_LIBRARIES} + ) add_dependencies ( FrameworkTests ParallelTest ) # Add to the 'FrameworkTests' group in VS diff --git a/Framework/Parallel/test/ChunkerTest.h b/Framework/Parallel/test/ChunkerTest.h new file mode 100644 index 0000000000000000000000000000000000000000..5794288a9684bf27a03e071dfb3e21a3e5ea1f07 --- /dev/null +++ b/Framework/Parallel/test/ChunkerTest.h @@ -0,0 +1,363 @@ +#ifndef MANTID_PARALLEL_CHUNKERTEST_H_ +#define MANTID_PARALLEL_CHUNKERTEST_H_ + +#include <cxxtest/TestSuite.h> + +#include <algorithm> +#include <numeric> + +#include "MantidParallel/IO/Chunker.h" + +using namespace Mantid::Parallel::IO; + +namespace Mantid { +namespace Parallel { +namespace IO { +bool operator==(const Chunker::LoadRange &a, const Chunker::LoadRange &b) { + return a.bankIndex == b.bankIndex && a.eventOffset == b.eventOffset && + a.eventCount == b.eventCount; +} +} +} +} + +class ChunkerTest : public CxxTest::TestSuite { +public: + // This pair of boilerplate methods prevent the suite being created statically + // This means the constructor isn't called when running other tests + static ChunkerTest *createSuite() { return new ChunkerTest(); } + static void destroySuite(ChunkerTest *suite) { delete suite; } + + void test_chunkSize() { + const size_t chunkSize = 17; + const Chunker chunker(1, 0, {}, chunkSize); + TS_ASSERT_EQUALS(chunker.chunkSize(), chunkSize); + } + + void test_makeWorkerGroups_4_ranks() { + const int ranks = 4; + const int rank = 1; + const std::vector<size_t> bankSizes{6, 1, 4, 2}; + const size_t chunkSize = 2; + const Chunker chunker(ranks, rank, bankSizes, chunkSize); + const auto &groups = chunker.makeWorkerGroups(); + TS_ASSERT_EQUALS(groups.size(), 2); + TS_ASSERT_EQUALS(groups[0][0], 0); + TS_ASSERT_EQUALS(groups[0][1], 1); + TS_ASSERT_EQUALS(groups[1][0], 2); + TS_ASSERT_EQUALS(groups[1][1], 3); + } + + void test_makeWorkerGroups_4_ranks_different_group_sizes() { + const int ranks = 4; + const int rank = 1; + const std::vector<size_t> bankSizes{9, 1, 1, 1}; + const size_t chunkSize = 2; + const Chunker chunker(ranks, rank, bankSizes, chunkSize); + const auto &groups = chunker.makeWorkerGroups(); + TS_ASSERT_EQUALS(groups.size(), 2); + TS_ASSERT_EQUALS(groups[0][0], 0); + TS_ASSERT_EQUALS(groups[0][1], 1); + TS_ASSERT_EQUALS(groups[0][2], 2); + TS_ASSERT_EQUALS(groups[1][0], 3); + } + + void test_makeRankGroups_4_ranks_zero_size_bank() { + const int ranks = 4; + const int rank = 1; + const std::vector<size_t> bankSizes{9, 0, 1, 1}; + const size_t chunkSize = 2; + const Chunker chunker(ranks, rank, bankSizes, chunkSize); + const auto &groups = chunker.makeWorkerGroups(); + TS_ASSERT_EQUALS(groups.size(), 2); + TS_ASSERT_EQUALS(groups[0].size(), 3); + TS_ASSERT_EQUALS(groups[1].size(), 1); + TS_ASSERT_EQUALS(groups[0][0], 0); + TS_ASSERT_EQUALS(groups[0][1], 1); + // This should be the size-zero bank. It is currently added to the first + // group, but in principle this could be changed. + TS_ASSERT_EQUALS(groups[0][2], 2); + TS_ASSERT_EQUALS(groups[1][0], 3); + } + + void test_makeLoadRanges_1_rank() { + const int ranks = 1; + const int rank = 0; + const std::vector<size_t> bankSizes{7, 2, 4, 1}; + const size_t chunkSize = 2; + const Chunker chunker(ranks, rank, bankSizes, chunkSize); + const auto ranges = chunker.makeLoadRanges(); + TS_ASSERT_EQUALS(ranges.size(), 1 + 1 + 2 + 4); + size_t bank = 0; + TS_ASSERT_EQUALS(ranges[0], (Chunker::LoadRange{bank, 0, 2})); + TS_ASSERT_EQUALS(ranges[1], (Chunker::LoadRange{bank, 2, 2})); + TS_ASSERT_EQUALS(ranges[2], (Chunker::LoadRange{bank, 4, 2})); + TS_ASSERT_EQUALS(ranges[3], (Chunker::LoadRange{bank, 6, 1})); + bank = 2; + TS_ASSERT_EQUALS(ranges[4], (Chunker::LoadRange{bank, 0, 2})); + TS_ASSERT_EQUALS(ranges[5], (Chunker::LoadRange{bank, 2, 2})); + bank = 1; + TS_ASSERT_EQUALS(ranges[6], (Chunker::LoadRange{bank, 0, 2})); + bank = 3; + TS_ASSERT_EQUALS(ranges[7], (Chunker::LoadRange{bank, 0, 1})); + } + + void test_makeLoadRanges_zero_size_bank() { + const int ranks = 1; + const int rank = 0; + const std::vector<size_t> bankSizes{7, 0, 4, 1}; + const size_t chunkSize = 2; + const Chunker chunker(ranks, rank, bankSizes, chunkSize); + const auto ranges = chunker.makeLoadRanges(); + TS_ASSERT_EQUALS(ranges.size(), 4 + 0 + 2 + 1); + size_t bank = 0; + TS_ASSERT_EQUALS(ranges[0], (Chunker::LoadRange{bank, 0, 2})); + TS_ASSERT_EQUALS(ranges[1], (Chunker::LoadRange{bank, 2, 2})); + TS_ASSERT_EQUALS(ranges[2], (Chunker::LoadRange{bank, 4, 2})); + TS_ASSERT_EQUALS(ranges[3], (Chunker::LoadRange{bank, 6, 1})); + bank = 2; + TS_ASSERT_EQUALS(ranges[4], (Chunker::LoadRange{bank, 0, 2})); + TS_ASSERT_EQUALS(ranges[5], (Chunker::LoadRange{bank, 2, 2})); + bank = 3; + TS_ASSERT_EQUALS(ranges[6], (Chunker::LoadRange{bank, 0, 1})); + // Note: No entry for bank = 1. + } + + void test_makeLoadRanges_2_ranks_rank0() { + const int ranks = 2; + const int rank = 0; + const std::vector<size_t> bankSizes{6, 1, 4, 2}; + const size_t chunkSize = 2; + const Chunker chunker(ranks, rank, bankSizes, chunkSize); + const auto ranges = chunker.makeLoadRanges(); + TS_ASSERT_EQUALS(ranges.size(), 4); + size_t bank = 0; + TS_ASSERT_EQUALS(ranges[0], (Chunker::LoadRange{bank, 0, 2})); + TS_ASSERT_EQUALS(ranges[1], (Chunker::LoadRange{bank, 2, 2})); + TS_ASSERT_EQUALS(ranges[2], (Chunker::LoadRange{bank, 4, 2})); + bank = 1; + // Note that bank is not 3, which would be the next largest fitting into the + // partition, but internally math is done based on chunks so 2 == 1. + TS_ASSERT_EQUALS(ranges[3], (Chunker::LoadRange{bank, 0, 1})); + } + + void test_makeLoadRanges_2_ranks_rank1() { + const int ranks = 2; + const int rank = 1; + const std::vector<size_t> bankSizes{6, 1, 4, 2}; + const size_t chunkSize = 2; + const Chunker chunker(ranks, rank, bankSizes, chunkSize); + const auto ranges = chunker.makeLoadRanges(); + TS_ASSERT_EQUALS(ranges.size(), 4); + size_t bank = 2; + TS_ASSERT_EQUALS(ranges[0], (Chunker::LoadRange{bank, 0, 2})); + TS_ASSERT_EQUALS(ranges[1], (Chunker::LoadRange{bank, 2, 2})); + bank = 3; + TS_ASSERT_EQUALS(ranges[2], (Chunker::LoadRange{bank, 0, 2})); + // Last range is padding (size 0) + bank = 0; + TS_ASSERT_EQUALS(ranges[3], (Chunker::LoadRange{bank, 0, 0})); + } + + void test_makeLoadRanges_4_ranks_rank1() { + const int ranks = 4; + const int rank = 1; + const std::vector<size_t> bankSizes{6, 1, 4, 2}; + const size_t chunkSize = 2; + const Chunker chunker(ranks, rank, bankSizes, chunkSize); + const auto ranges = chunker.makeLoadRanges(); + TS_ASSERT_EQUALS(ranges.size(), 2); + size_t bank = 0; + TS_ASSERT_EQUALS(ranges[0], (Chunker::LoadRange{bank, 2, 2})); + bank = 1; + TS_ASSERT_EQUALS(ranges[1], (Chunker::LoadRange{bank, 0, 1})); + } + + void test_makeLoadRange_many_random_banks() { + for (int workers = 1; workers < 100; ++workers) { + for (int worker = 0; worker < workers; ++worker) { + // The following bank sizes come from actual files which have cause + // trouble so this also servers as a regression test. + for (const auto &bankSizes : + {std::vector<size_t>{2091281, 520340, 841355, 912704, 1435110, + 567885, 1850044, 1333453, 1507522, 1396560, + 1699092, 1484645, 515805, 474417, 633111, + 600780, 638784, 572031, 741562, 593741, + 546107, 552800, 556607}, + std::vector<size_t>{ + 5158050, 5566070, 5528000, 5461070, 5937410, 7415620, 5720310, + 6387840, 6007800, 6331110, 4744170, 20912810, 14846450, + 16990920, 13965600, 15075220, 13334530, 18500440, 5678850, + 14351100, 9127040, 8413550, 5203400}}) { + const size_t chunkSize = 1024 * 1024; + TS_ASSERT_THROWS_NOTHING( + Chunker chunker(workers, worker, bankSizes, chunkSize)); + Chunker chunker(workers, worker, bankSizes, chunkSize); + TS_ASSERT_THROWS_NOTHING(chunker.makeLoadRanges()); + } + } + } + } + + void test_makeBalancedPartitioning_1_worker() { + const size_t workers = 1; + const std::vector<size_t> sizes{7, 1, 3}; + const auto result = Chunker::makeBalancedPartitioning(workers, sizes); + TS_ASSERT_EQUALS(result.size(), 1); + TS_ASSERT_EQUALS(result[0].first, 1); + TS_ASSERT_EQUALS(result[0].second, (std::vector<size_t>{0, 2, 1})); + } + + void test_makeBalancedPartitioning_2_workers_striping() { + const size_t workers = 2; + const std::vector<size_t> sizes{7, 1, 3}; + const auto result = Chunker::makeBalancedPartitioning(workers, sizes); + // Largest size is more than 50% of total, so striping is used + TS_ASSERT_EQUALS(result.size(), 1); + TS_ASSERT_EQUALS(result[0].first, 2); + TS_ASSERT_EQUALS(result[0].second, (std::vector<size_t>{0, 2, 1})); + } + + void test_makeBalancedPartitioning_2_workers_no_striping() { + const size_t workers = 2; + const std::vector<size_t> sizes{7, 1, 6}; + const auto result = Chunker::makeBalancedPartitioning(workers, sizes); + // Largest size is equal to 50% of total, i.e., no striping necessary + TS_ASSERT_EQUALS(result.size(), 2); + TS_ASSERT_EQUALS(result[0].first, 1); + TS_ASSERT_EQUALS(result[0].second, (std::vector<size_t>{0})); + TS_ASSERT_EQUALS(result[1].first, 1); + TS_ASSERT_EQUALS(result[1].second, (std::vector<size_t>{2, 1})); + } + + void test_makeBalancedPartitioning_2_workers_tied_sizes() { + const size_t workers = 2; + const std::vector<size_t> sizes{7, 1, 7}; + const auto result = Chunker::makeBalancedPartitioning(workers, sizes); + TS_ASSERT_EQUALS(result.size(), 2); + TS_ASSERT_EQUALS(result[0].first, 1); + TS_ASSERT_EQUALS(result[0].second, (std::vector<size_t>{0, 1})); + TS_ASSERT_EQUALS(result[1].first, 1); + TS_ASSERT_EQUALS(result[1].second, (std::vector<size_t>{2})); + } + + void test_makeBalancedPartitioning_3_workers_striping() { + const size_t workers = 3; + const std::vector<size_t> sizes{9, 1, 3}; + const auto result = Chunker::makeBalancedPartitioning(workers, sizes); + // Largest size is more than 2/3 of total, so striping is used + TS_ASSERT_EQUALS(result.size(), 1); + TS_ASSERT_EQUALS(result[0].first, 3); + TS_ASSERT_EQUALS(result[0].second, (std::vector<size_t>{0, 2, 1})); + } + + void test_makeBalancedPartitioning_3_workers_partial_striping() { + const size_t workers = 3; + const std::vector<size_t> sizes{8, 1, 3}; + const auto result = Chunker::makeBalancedPartitioning(workers, sizes); + // Largest size is 2/3 of total, so striping for largest, no striping for + // others. + TS_ASSERT_EQUALS(result.size(), 2); + TS_ASSERT_EQUALS(result[0].first, 2); + TS_ASSERT_EQUALS(result[0].second, (std::vector<size_t>{0})); + TS_ASSERT_EQUALS(result[1].first, 1); + TS_ASSERT_EQUALS(result[1].second, (std::vector<size_t>{2, 1})); + } + + void test_makeBalancedPartitioning_4_workers_striping() { + const size_t workers = 4; + const std::vector<size_t> sizes{13, 1, 3}; + const auto result = Chunker::makeBalancedPartitioning(workers, sizes); + // Largest size is more than 3/4 of total, so striping is used + TS_ASSERT_EQUALS(result.size(), 1); + TS_ASSERT_EQUALS(result[0].first, 4); + TS_ASSERT_EQUALS(result[0].second, (std::vector<size_t>{0, 2, 1})); + } + + void test_makeBalancedPartitioning_4_workers_partial_striping() { + const size_t workers = 4; + const std::vector<size_t> sizes{12, 1, 3}; + const auto result = Chunker::makeBalancedPartitioning(workers, sizes); + // Largest size is 3/4 of total, so striping for largest, no striping for + // others. + TS_ASSERT_EQUALS(result.size(), 2); + TS_ASSERT_EQUALS(result[0].first, 3); + TS_ASSERT_EQUALS(result[0].second, (std::vector<size_t>{0})); + TS_ASSERT_EQUALS(result[1].first, 1); + TS_ASSERT_EQUALS(result[1].second, (std::vector<size_t>{2, 1})); + } + + void test_makeBalancedPartitioning_4_workers_partial_independent_striping() { + const size_t workers = 4; + const std::vector<size_t> sizes{4, 1, 3}; + const auto result = Chunker::makeBalancedPartitioning(workers, sizes); + // Largest size is 2/4 of total, so striping for largest with half of + // workers and striping for others with the other half. + TS_ASSERT_EQUALS(result.size(), 2); + TS_ASSERT_EQUALS(result[0].first, 2); + TS_ASSERT_EQUALS(result[0].second, (std::vector<size_t>{0})); + TS_ASSERT_EQUALS(result[1].first, 2); + TS_ASSERT_EQUALS(result[1].second, (std::vector<size_t>{2, 1})); + } + + void test_large_and_small_banks_with_many_ranks() { + for (int workers = 1; workers < 100; ++workers) { + const std::vector<size_t> sizes{1234, 5678, 17, 3, 555}; + const auto result = Chunker::makeBalancedPartitioning(workers, sizes); + + // Maximum work a single worker has to do + size_t maxWork = 0; + for (const auto &item : result) { + size_t size = 0; + for (const auto bank : item.second) + size += sizes[bank]; + const size_t work = (size + item.first - 1) / item.first; + maxWork = std::max(maxWork, work); + } + + const size_t totalWork = + std::accumulate(sizes.begin(), sizes.end(), static_cast<size_t>(0)); + const size_t wastedWork = maxWork * workers - totalWork; + + // Fuzzy test to ensure that imbalance is not too large. This are by no + // means hard limits and may be subject to change. Current limit is: At + // most 30% and 3 of the workers may be `wasted` (whichever is less). + TS_ASSERT(static_cast<double>(wastedWork) / + static_cast<double>(totalWork) < + std::min(0.3, 3.0 / workers)); + } + } + + void test_several_small_banks() { + const int workers = 2; + for (size_t banks = 2; banks < 10; ++banks) { + const std::vector<size_t> sizes(banks, 1); + const auto result = Chunker::makeBalancedPartitioning(workers, sizes); + TS_ASSERT_EQUALS(result.size(), workers); + TS_ASSERT_EQUALS(result[0].first, 1); + TS_ASSERT_EQUALS(result[1].first, 1); + TS_ASSERT_EQUALS(result[0].second.size(), + (banks + workers - 1) / workers); + TS_ASSERT_EQUALS(result[1].second.size(), banks / workers); + } + } + + void test_makeBalancedPartitioning_zero_size_bank() { + const size_t workers = 2; + const std::vector<size_t> sizes{5, 0, 3}; + const auto result = Chunker::makeBalancedPartitioning(workers, sizes); + TS_ASSERT_EQUALS(result.size(), 1); + TS_ASSERT_EQUALS(result[0].first, 2); + TS_ASSERT_EQUALS(result[0].second, (std::vector<size_t>{0, 2, 1})); + } + + void test_makeBalancedPartitioning_all_banks_empty() { + const size_t workers = 2; + const std::vector<size_t> sizes{0, 0, 0}; + const auto result = Chunker::makeBalancedPartitioning(workers, sizes); + TS_ASSERT_EQUALS(result.size(), 1); + TS_ASSERT_EQUALS(result[0].first, 2); + } +}; + +#endif /* MANTID_PARALLEL_CHUNKERTEST_H_ */ diff --git a/Framework/Parallel/test/CollectivesTest.h b/Framework/Parallel/test/CollectivesTest.h index 35bf7231eeeec9b1e0324bf9aef2fc784ef81187..3bbe4c061b35a7b9ee45e6bd6dd94bcad3f4ca82 100644 --- a/Framework/Parallel/test/CollectivesTest.h +++ b/Framework/Parallel/test/CollectivesTest.h @@ -11,7 +11,7 @@ using namespace Parallel; namespace { void run_gather(const Communicator &comm) { - int root = 2; + int root = std::min(comm.size() - 1, 2); int value = 123 * comm.rank(); std::vector<int> result; TS_ASSERT_THROWS_NOTHING(Parallel::gather(comm, value, result, root)); @@ -26,7 +26,7 @@ void run_gather(const Communicator &comm) { } void run_gather_short_version(const Communicator &comm) { - int root = 2; + int root = std::min(comm.size() - 1, 2); int value = 123 * comm.rank(); if (comm.rank() == root) { std::vector<int> result; @@ -39,6 +39,18 @@ void run_gather_short_version(const Communicator &comm) { TS_ASSERT_THROWS_NOTHING(Parallel::gather(comm, value, root)); } } + +void run_all_to_all(const Communicator &comm) { + std::vector<int> data; + for (int rank = 0; rank < comm.size(); ++rank) + data.emplace_back(1000 * comm.rank() + rank); + std::vector<int> result; + TS_ASSERT_THROWS_NOTHING(Parallel::all_to_all(comm, data, result)); + TS_ASSERT_EQUALS(result.size(), comm.size()); + for (int i = 0; i < comm.size(); ++i) { + TS_ASSERT_EQUALS(result[i], 1000 * i + comm.rank()); + } +} } class CollectivesTest : public CxxTest::TestSuite { @@ -53,6 +65,8 @@ public: void test_gather_short_version() { ParallelTestHelpers::runParallel(run_gather_short_version); } + + void test_all_to_all() { ParallelTestHelpers::runParallel(run_all_to_all); } }; #endif /* MANTID_PARALLEL_COLLECTIVESTEST_H_ */ diff --git a/Framework/Parallel/test/EventDataPartitionerTest.h b/Framework/Parallel/test/EventDataPartitionerTest.h new file mode 100644 index 0000000000000000000000000000000000000000..aa46e856d1077d1bf2294bf1f5e86b07ded317c8 --- /dev/null +++ b/Framework/Parallel/test/EventDataPartitionerTest.h @@ -0,0 +1,134 @@ +#ifndef MANTID_PARALLEL_EVENTDATAPARTITIONERTEST_H_ +#define MANTID_PARALLEL_EVENTDATAPARTITIONERTEST_H_ + +#include <cxxtest/TestSuite.h> + +#include "MantidParallel/IO/EventDataPartitioner.h" + +using namespace Mantid::Parallel::IO; +using Mantid::Types::Core::DateAndTime; +namespace Mantid { +namespace Parallel { +namespace IO { +namespace detail { +bool operator==(const Event<double> &a, const Event<double> &b) { + return a.index == b.index && a.tof == b.tof && a.pulseTime == b.pulseTime; +} +} +} +} +} +using Event = detail::Event<double>; + +class EventDataPartitionerTest : public CxxTest::TestSuite { +public: + // This pair of boilerplate methods prevent the suite being created statically + // This means the constructor isn't called when running other tests + static EventDataPartitionerTest *createSuite() { + return new EventDataPartitionerTest(); + } + static void destroySuite(EventDataPartitionerTest *suite) { delete suite; } + + void test_construct() { + TS_ASSERT_THROWS_NOTHING((EventDataPartitioner<int32_t, int64_t, double>( + 7, PulseTimeGenerator<int32_t, int64_t>{}))); + } + + void test_empty_range() { + for (const auto workers : {1, 2, 3}) { + EventDataPartitioner<int32_t, int64_t, double> partitioner( + workers, PulseTimeGenerator<int32_t, int64_t>( + {0, 2, 2, 3}, {2, 4, 6, 8}, "nanosecond", 0)); + size_t count = 0; + std::vector<std::vector<Event>> data; + partitioner.partition(data, nullptr, nullptr, {0, 1, count}); + TS_ASSERT_EQUALS(data.size(), workers); + for (int worker = 0; worker < workers; ++worker) { + TS_ASSERT_EQUALS(data[worker].size(), 0); + } + } + } + + void test_partition_1_worker() { + EventDataPartitioner<int32_t, int64_t, double> partitioner( + 1, PulseTimeGenerator<int32_t, int64_t>({0, 2, 2, 3}, {2, 4, 6, 8}, + "nanosecond", 0)); + std::vector<std::vector<Event>> data; + std::vector<int32_t> index{5, 1, 4}; + std::vector<double> tof{1.1, 2.2, 3.3}; + // Starting at beginning, length 3 + partitioner.partition(data, index.data(), tof.data(), {0, 0, 3}); + TS_ASSERT_EQUALS(data.size(), 1); + TS_ASSERT_EQUALS(data[0].size(), 3); + TS_ASSERT_EQUALS(data[0][0], (Event{5, 1.1, DateAndTime(2)})); + TS_ASSERT_EQUALS(data[0][1], (Event{1, 2.2, DateAndTime(2)})); + TS_ASSERT_EQUALS(data[0][2], (Event{4, 3.3, DateAndTime(6)})); + // Starting at offset 1, length 3 + partitioner.partition(data, index.data(), tof.data(), {0, 1, 3}); + TS_ASSERT_EQUALS(data.size(), 1); + TS_ASSERT_EQUALS(data[0].size(), 3); + TS_ASSERT_EQUALS(data[0][0], (Event{5, 1.1, DateAndTime(2)})); + TS_ASSERT_EQUALS(data[0][1], (Event{1, 2.2, DateAndTime(6)})); + TS_ASSERT_EQUALS(data[0][2], (Event{4, 3.3, DateAndTime(8)})); + } + + void test_partition_2_workers() { + EventDataPartitioner<int32_t, int64_t, double> partitioner( + 2, PulseTimeGenerator<int32_t, int64_t>({0, 2, 2, 3}, {2, 4, 6, 8}, + "nanosecond", 0)); + std::vector<std::vector<Event>> data; + std::vector<int32_t> index{5, 1, 4, 1}; + std::vector<double> tof{1.1, 2.2, 3.3, 4.4}; + // Starting at beginning, length 4 + partitioner.partition(data, index.data(), tof.data(), {0, 0, 4}); + TS_ASSERT_EQUALS(data.size(), 2); + // Worker is given by index%workers + TS_ASSERT_EQUALS(data[0].size(), 1); + TS_ASSERT_EQUALS(data[1].size(), 3); + // Index is translated to local index = index/workers + TS_ASSERT_EQUALS(data[1][0], (Event{2, 1.1, DateAndTime(2)})); + TS_ASSERT_EQUALS(data[1][1], (Event{0, 2.2, DateAndTime(2)})); + TS_ASSERT_EQUALS(data[0][0], (Event{2, 3.3, DateAndTime(6)})); + TS_ASSERT_EQUALS(data[1][2], (Event{0, 4.4, DateAndTime(8)})); + // Starting at offset 1, length 4 + partitioner.partition(data, index.data(), tof.data(), {0, 1, 4}); + TS_ASSERT_EQUALS(data.size(), 2); + TS_ASSERT_EQUALS(data[0].size(), 1); + TS_ASSERT_EQUALS(data[1].size(), 3); + TS_ASSERT_EQUALS(data[1][0], (Event{2, 1.1, DateAndTime(2)})); + TS_ASSERT_EQUALS(data[1][1], (Event{0, 2.2, DateAndTime(6)})); + TS_ASSERT_EQUALS(data[0][0], (Event{2, 3.3, DateAndTime(8)})); + TS_ASSERT_EQUALS(data[1][2], (Event{0, 4.4, DateAndTime(8)})); + } + + void test_partition_3_workers() { + EventDataPartitioner<int32_t, int64_t, double> partitioner( + 3, PulseTimeGenerator<int32_t, int64_t>({0, 2, 2, 3}, {2, 4, 6, 8}, + "nanosecond", 0)); + std::vector<std::vector<Event>> data; + std::vector<int32_t> index{5, 1, 4, 1}; + std::vector<double> tof{1.1, 2.2, 3.3, 4.4}; + // Starting at beginning, length 4 + partitioner.partition(data, index.data(), tof.data(), {0, 0, 4}); + TS_ASSERT_EQUALS(data.size(), 3); + TS_ASSERT_EQUALS(data[0].size(), 0); // no index with %3 == 0 + TS_ASSERT_EQUALS(data[1].size(), 3); + TS_ASSERT_EQUALS(data[2].size(), 1); + TS_ASSERT_EQUALS(data[2][0], (Event{1, 1.1, DateAndTime(2)})); + TS_ASSERT_EQUALS(data[1][0], (Event{0, 2.2, DateAndTime(2)})); + TS_ASSERT_EQUALS(data[1][1], (Event{1, 3.3, DateAndTime(6)})); + TS_ASSERT_EQUALS(data[1][2], (Event{0, 4.4, DateAndTime(8)})); + // Starting at offset 1, length 4 + partitioner.partition(data, index.data(), tof.data(), {0, 1, 4}); + TS_ASSERT_EQUALS(data.size(), 3); + TS_ASSERT_EQUALS(data[0].size(), 0); // no index with %3 == 0 + TS_ASSERT_EQUALS(data[1].size(), 3); + TS_ASSERT_EQUALS(data[2].size(), 1); + TS_ASSERT_EQUALS(data[2][0], (Event{1, 1.1, DateAndTime(2)})); + TS_ASSERT_EQUALS(data[1][0], (Event{0, 2.2, DateAndTime(6)})); + TS_ASSERT_EQUALS(data[1][1], (Event{1, 3.3, DateAndTime(8)})); + TS_ASSERT_EQUALS(data[1][2], (Event{0, 4.4, DateAndTime(8)})); + } +}; + +#endif /* MANTID_PARALLEL_EVENTDATAPARTITIONERTEST_H_ */ diff --git a/Framework/Parallel/test/EventLoaderTest.h b/Framework/Parallel/test/EventLoaderTest.h new file mode 100644 index 0000000000000000000000000000000000000000..1f7abc954674285dc82a0794e0e18601bdfd7343 --- /dev/null +++ b/Framework/Parallel/test/EventLoaderTest.h @@ -0,0 +1,198 @@ +#ifndef MANTID_PARALLEL_EVENTLOADERTEST_H_ +#define MANTID_PARALLEL_EVENTLOADERTEST_H_ + +#include <cxxtest/TestSuite.h> +#include "MantidTestHelpers/ParallelRunner.h" + +#include "MantidParallel/IO/Chunker.h" +#include "MantidParallel/IO/EventLoader.h" +#include "MantidParallel/IO/EventParser.h" +#include "MantidParallel/IO/NXEventDataSource.h" +#include "MantidTypes/Event/TofEvent.h" + +#include <H5Cpp.h> + +namespace Mantid { +namespace Parallel { +namespace IO { +namespace EventLoader { +template <class T> void load() { throw std::runtime_error("unknown"); } +template <> void load<int32_t>() { throw std::runtime_error("int32_t"); } +template <> void load<int64_t>() { throw std::runtime_error("int64_t"); } +template <> void load<uint32_t>() { throw std::runtime_error("uint32_t"); } +template <> void load<uint64_t>() { throw std::runtime_error("uint64_t"); } +template <> void load<float>() { throw std::runtime_error("float"); } +template <> void load<double>() { throw std::runtime_error("double"); } +} +} +} +} + +#include "MantidParallel/IO/EventLoaderHelpers.h" + +using namespace Mantid; +using namespace Parallel; +using namespace Parallel::IO; + +namespace { + +class FakeDataSource : public NXEventDataSource<int32_t> { +public: + FakeDataSource(const int numWorkers) : m_numWorkers(numWorkers) {} + std::unique_ptr<AbstractEventDataPartitioner<int32_t>> + setBankIndex(const size_t bank) override { + m_bank = bank; + auto index = std::vector<int64_t>{0, 100, 100, + 300 * static_cast<int64_t>(m_bank + 1), + 500 * static_cast<int64_t>(m_bank + 1), + 700 * static_cast<int64_t>(m_bank + 1)}; + std::vector<double> time_zero; + for (size_t i = 0; i < index.size(); ++i) + time_zero.push_back(static_cast<double>(10 * i + bank)); + + // Drift depening on bank to ensure correct offset is used for every bank. + int64_t time_zero_offset = 123456789 + 1000000 * m_bank; + + return Kernel::make_unique<EventDataPartitioner<int64_t, double, int32_t>>( + m_numWorkers, PulseTimeGenerator<int64_t, double>{ + index, time_zero, "second", time_zero_offset}); + } + + void readEventID(int32_t *event_id, size_t start, + size_t count) const override { + // Factor 13 such that there is a gap in the detector IDs between banks. + for (size_t i = 0; i < count; ++i) + event_id[i] = static_cast<int32_t>(m_bank * 13 * m_pixelsPerBank + + (start + i) % m_pixelsPerBank); + } + + void readEventTimeOffset(int32_t *event_time_offset, size_t start, + size_t count) const override { + for (size_t i = 0; i < count; ++i) + event_time_offset[i] = static_cast<int32_t>(17 * m_bank + start + i); + } + + std::string readEventTimeOffsetUnit() const override { + // Using nanosecond implies that EventLoader must convert to microsecond, + // allowing us to see and test the conversion in action. + return "nanosecond"; + } + +private: + const int m_numWorkers; + const size_t m_pixelsPerBank{77}; + size_t m_bank{0}; +}; + +void do_test_load(const Parallel::Communicator &comm, const size_t chunkSize) { + const std::vector<size_t> bankSizes{111, 1111, 11111}; + Chunker chunker(comm.size(), comm.rank(), bankSizes, chunkSize); + // FakeDataSource encodes information on bank and position in file into TOF + // and pulse times, such that we can verify correct mapping. + FakeDataSource dataSource(comm.size()); + const std::vector<int32_t> bankOffsets{0, 12 * 77, 24 * 77}; + std::vector<std::vector<Types::Event::TofEvent>> eventLists( + (3 * 77 + comm.size() - 1 - comm.rank()) / comm.size()); + std::vector<std::vector<Types::Event::TofEvent> *> eventListPtrs; + for (auto &eventList : eventLists) + eventListPtrs.emplace_back(&eventList); + + EventParser<int32_t> dataSink(comm, chunker.makeWorkerGroups(), bankOffsets, + eventListPtrs); + TS_ASSERT_THROWS_NOTHING( + (EventLoader::load<int32_t>(chunker, dataSource, dataSink))); + + for (size_t localSpectrumIndex = 0; localSpectrumIndex < eventLists.size(); + ++localSpectrumIndex) { + size_t globalSpectrumIndex = comm.size() * localSpectrumIndex + comm.rank(); + size_t bank = globalSpectrumIndex / 77; + size_t pixelInBank = globalSpectrumIndex % 77; + TS_ASSERT_EQUALS(eventLists[localSpectrumIndex].size(), + (bankSizes[bank] + 77 - 1 - pixelInBank) / 77); + int64_t previousPulseTime{0}; + for (size_t event = 0; event < eventLists[localSpectrumIndex].size(); + ++event) { + // Every 77th event in the input is in this list so our TOF should jump + // over 77 TOFs in the input. + double microseconds = + static_cast<double>(17 * bank + 77 * event + pixelInBank) * 1e-3; + TS_ASSERT_EQUALS(eventLists[localSpectrumIndex][event].tof(), + microseconds); + size_t index = event * 77 + pixelInBank; + size_t pulse = 0; + if (index >= 100) + pulse = 2; + if (index >= 300 * static_cast<size_t>(bank + 1)) + pulse = 3; + if (index >= 500 * static_cast<size_t>(bank + 1)) + pulse = 4; + if (index >= 700 * static_cast<size_t>(bank + 1)) + pulse = 5; + // Testing different aspects that affect pulse time: + // - `123456789 + 1000000 * bank` confirms that the event_time_zero + // offset attribute is taken into account, and for correct bank. + // - `10 * pulse + bank` confirms that currect event_index is used and + // event_time_offset is used correctly, and for correct bank. + // - The factor 1000000000 converts event_time_offset from input unit + // seconds to nanoseconds, confirming that the input unit is adhered to. + const auto pulseTime = + eventLists[localSpectrumIndex][event].pulseTime().totalNanoseconds(); + TS_ASSERT_EQUALS(pulseTime, 123456789 + 1000000 * bank + + (10 * pulse + bank) * 1000000000); + TS_ASSERT(pulseTime >= previousPulseTime); + previousPulseTime = pulseTime; + } + } +} +} + +class EventLoaderTest : public CxxTest::TestSuite { +public: + // This pair of boilerplate methods prevent the suite being created statically + // This means the constructor isn't called when running other tests + static EventLoaderTest *createSuite() { return new EventLoaderTest(); } + static void destroySuite(EventLoaderTest *suite) { delete suite; } + + void test_throws_if_file_does_not_exist() { + TS_ASSERT_THROWS( + EventLoader::load(Communicator{}, "abcdefg", "", {}, {}, {}), + H5::FileIException); + } + + void test_H5DataType_parameter_pack_conversion() { + using EventLoader::load; + TS_ASSERT_THROWS_EQUALS(load(H5::PredType::NATIVE_INT32), + const std::runtime_error &e, std::string(e.what()), + "int32_t"); + TS_ASSERT_THROWS_EQUALS(load(H5::PredType::NATIVE_INT64), + const std::runtime_error &e, std::string(e.what()), + "int64_t"); + TS_ASSERT_THROWS_EQUALS(load(H5::PredType::NATIVE_UINT32), + const std::runtime_error &e, std::string(e.what()), + "uint32_t"); + TS_ASSERT_THROWS_EQUALS(load(H5::PredType::NATIVE_UINT64), + const std::runtime_error &e, std::string(e.what()), + "uint64_t"); + TS_ASSERT_THROWS_EQUALS(load(H5::PredType::NATIVE_FLOAT), + const std::runtime_error &e, std::string(e.what()), + "float"); + TS_ASSERT_THROWS_EQUALS(load(H5::PredType::NATIVE_DOUBLE), + const std::runtime_error &e, std::string(e.what()), + "double"); + TS_ASSERT_THROWS_EQUALS( + load(H5::PredType::NATIVE_CHAR), const std::runtime_error &e, + std::string(e.what()), + "Unsupported H5::DataType for event_time_offset in NXevent_data"); + } + + void test_load() { + for (const size_t chunkSize : {37, 123, 1111}) { + for (const auto threads : {1, 2, 3, 5, 7, 13}) { + ParallelTestHelpers::ParallelRunner runner(threads); + runner.run(do_test_load, chunkSize); + } + } + } +}; + +#endif /* MANTID_PARALLEL_EVENTLOADERTEST_H_ */ diff --git a/Framework/Parallel/test/EventParserTest.h b/Framework/Parallel/test/EventParserTest.h new file mode 100644 index 0000000000000000000000000000000000000000..0dce40044bdbb7e1165d6dc4fc4fdc58fb0154d8 --- /dev/null +++ b/Framework/Parallel/test/EventParserTest.h @@ -0,0 +1,453 @@ +#ifndef MANTID_PARALLEL_COLLECTIVESTEST_H_ +#define MANTID_PARALLEL_COLLECTIVESTEST_H_ + +#include <cxxtest/TestSuite.h> + +#include "MantidParallel/IO/EventParser.h" +#include <boost/make_shared.hpp> +#include <numeric> + +using namespace Mantid; +using namespace Parallel::IO; +using Mantid::Types::Core::DateAndTime; +using Mantid::Types::Event::TofEvent; + +namespace anonymous { +template <typename IndexType, typename TimeZeroType, typename TimeOffsetType> +class FakeParserDataGenerator { +public: + FakeParserDataGenerator(size_t numBanks, size_t pixelsPerBank, + size_t numPulses, size_t maxEventsPerPixel = 100) { + generateTestData(numBanks, pixelsPerBank, numPulses, maxEventsPerPixel); + } + + const std::vector<int32_t> &bankOffsets() const { return m_bank_offsets; } + + const std::vector<IndexType> &eventIndex(size_t bank) const { + return m_event_indices[bank]; + } + + const std::vector<TimeZeroType> &eventTimeZero() const { + return m_event_time_zero; + } + + const std::vector<TimeOffsetType> &eventTimeOffset(size_t bank) const { + return m_event_time_offsets[bank]; + } + + const std::vector<int32_t> &eventId(size_t bank) const { + return m_event_ids[bank]; + } + + Chunker::LoadRange generateBasicRange(size_t bank) { + Chunker::LoadRange range; + range.eventOffset = 0; + range.eventCount = m_event_ids[bank].size(); + range.bankIndex = bank; + + return range; + } + + boost::shared_ptr<EventParser<TimeOffsetType>> generateTestParser() { + test_event_lists.clear(); + test_event_lists.resize(m_referenceEventLists.size()); + std::vector<std::vector<TofEvent> *> eventLists; + for (auto &eventList : test_event_lists) + eventLists.emplace_back(&eventList); + Parallel::Communicator comm; + return boost::make_shared<EventParser<TimeOffsetType>>( + comm, std::vector<std::vector<int>>{}, m_bank_offsets, eventLists); + } + + void checkEventLists() const { + for (size_t i = 0; i < m_referenceEventLists.size(); ++i) + TS_ASSERT_EQUALS(m_referenceEventLists[i], test_event_lists[i]); + } + +private: + void generateTestData(const size_t numBanks, const size_t pixelsPerBank, + const size_t numPulses, + const size_t maxEventsPerPixel) { + initOffsetsAndIndices(numBanks, numPulses); + m_event_time_zero.resize(numPulses); + auto numPixels = numBanks * pixelsPerBank; + + m_event_ids.resize(numBanks); + m_event_time_offsets.resize(numBanks); + m_referenceEventLists.clear(); + m_referenceEventLists.resize(numPixels); + + for (size_t bank = 0; bank < numBanks; ++bank) { + size_t bankEventSize = 0; + for (size_t pulse = 0; pulse < numPulses; ++pulse) { + m_event_indices[bank][pulse] = static_cast<int32_t>(bankEventSize); + m_event_time_zero[pulse] = static_cast<TimeZeroType>(pulse * 100000); + for (size_t pixel = 0; pixel < pixelsPerBank; ++pixel) { + size_t absolutePixel = pixel + bank * pixelsPerBank; + auto eventSize = getRandEventSize(1, maxEventsPerPixel / numPulses); + bankEventSize += eventSize; + auto &list = m_referenceEventLists[absolutePixel]; + auto prev_end = list.size(); + std::generate_n(std::back_inserter(list), eventSize, [this, pulse]() { + return TofEvent(getRandomTimeOffset(100000), + m_event_time_zero[pulse]); + }); + std::fill_n( + std::back_inserter(m_event_ids[bank]), eventSize, + static_cast<IndexType>(m_bank_offsets[bank] + absolutePixel)); + std::transform(list.cbegin() + prev_end, list.cend(), + std::back_inserter(m_event_time_offsets[bank]), + [this](const TofEvent &event) { + return static_cast<TimeOffsetType>(event.tof()); + }); + } + } + } + } + + void initOffsetsAndIndices(const size_t numBanks, const size_t numPulses) { + m_bank_offsets.resize(numBanks); + m_event_indices.resize(numBanks); + for (size_t bank = 0; bank < numBanks; ++bank) { + m_bank_offsets[bank] = static_cast<int32_t>(bank * 1000) + 1000; + m_event_indices[bank].resize(numPulses); + } + } + + size_t getRandEventSize(size_t min = 1, size_t max = 1000) { + return static_cast<size_t>(rand()) % (max - min) + min; + } + + double getRandomTimeOffset(size_t pulseWidth) { + return static_cast<double>(rand() % pulseWidth); + } + + std::vector<int32_t> m_bank_offsets; + std::vector<std::vector<int32_t>> m_event_ids; + std::vector<std::vector<TimeOffsetType>> m_event_time_offsets; + std::vector<std::vector<IndexType>> m_event_indices; + std::vector<TimeZeroType> m_event_time_zero; + std::vector<std::vector<TofEvent>> m_referenceEventLists; + std::vector<std::vector<TofEvent>> test_event_lists; +}; +} + +class EventParserTest : public CxxTest::TestSuite { +public: + // This pair of boilerplate methods prevent the suite being created statically + // This means the constructor isn't called when running other tests + static EventParserTest *createSuite() { return new EventParserTest(); } + static void destroySuite(EventParserTest *suite) { delete suite; } + + void testConstruct() { + std::vector<std::vector<int>> rankGroups; + std::vector<int32_t> bankOffsets{1, 2, 3, 4}; + std::vector<std::vector<TofEvent> *> eventLists(4); + + Parallel::Communicator comm; + TS_ASSERT_THROWS_NOTHING( + (EventParser<double>(comm, rankGroups, bankOffsets, eventLists))); + } + + void testConvertEventIDToGlobalSpectrumIndex() { + std::vector<int32_t> bankOffsets{1000}; + std::vector<int32_t> eventId{1001, 1002, 1004, 1004}; + auto eventIdCopy = eventId; + detail::eventIdToGlobalSpectrumIndex(eventId.data(), eventId.size(), + bankOffsets[0]); + + TS_ASSERT_EQUALS(eventId[0], eventIdCopy[0] - bankOffsets[0]); + TS_ASSERT_EQUALS(eventId[1], eventIdCopy[1] - bankOffsets[0]); + TS_ASSERT_EQUALS(eventId[2], eventIdCopy[2] - bankOffsets[0]); + TS_ASSERT_EQUALS(eventId[3], eventIdCopy[3] - bankOffsets[0]); + } + + void testExtractEventsFull() { + anonymous::FakeParserDataGenerator<int32_t, int64_t, double> gen(1, 10, 5); + auto event_id = gen.eventId(0); + auto event_time_offset = gen.eventTimeOffset(0); + auto range = gen.generateBasicRange(0); + + detail::eventIdToGlobalSpectrumIndex(event_id.data() + range.eventOffset, + range.eventCount, 1000); + std::vector<std::vector<EventParser<double>::Event>> rankData; + // event_id now contains spectrum indices + EventDataPartitioner<int32_t, int64_t, double> partitioner( + 1, {gen.eventIndex(0), gen.eventTimeZero(), "nanosecond", 0}); + partitioner.partition(rankData, event_id.data(), + event_time_offset.data() + range.eventOffset, range); + + TS_ASSERT(std::equal(rankData[0].cbegin(), rankData[0].cend(), + event_time_offset.cbegin(), + [](const EventParser<double>::Event &e, + const double tof) { return tof == e.tof; })); + doTestRankData(rankData, gen, range); + } + + void testExtractEventsPartial() { + anonymous::FakeParserDataGenerator<int32_t, int64_t, double> gen(1, 10, 5); + auto event_id = gen.eventId(0); + auto event_time_offset = gen.eventTimeOffset(0); + auto range = Chunker::LoadRange{0, 5, 100}; + + detail::eventIdToGlobalSpectrumIndex(event_id.data() + range.eventOffset, + range.eventCount, 1000); + std::vector<std::vector<EventParser<double>::Event>> rankData; + // event_id now contains spectrum indices + EventDataPartitioner<int32_t, int64_t, double> partitioner( + 1, {gen.eventIndex(0), gen.eventTimeZero(), "nanosecond", 0}); + partitioner.partition(rankData, event_id.data(), + event_time_offset.data() + range.eventOffset, range); + + TS_ASSERT( + std::equal(rankData[0].cbegin(), rankData[0].cend(), + event_time_offset.cbegin() + range.eventOffset, + [](const EventParser<double>::Event &e, const double tof) { + return static_cast<double>(tof) == e.tof; + })); + doTestRankData(rankData, gen, range); + } + + void testParsingFull_1Pulse_1Bank() { + anonymous::FakeParserDataGenerator<int32_t, int32_t, double> gen(1, 10, 1); + auto parser = gen.generateTestParser(); + parser->setEventDataPartitioner( + Kernel::make_unique<EventDataPartitioner<int32_t, int32_t, double>>( + 1, PulseTimeGenerator<int32_t, int32_t>{ + gen.eventIndex(0), gen.eventTimeZero(), "nanosecond", 0})); + parser->setEventTimeOffsetUnit("microsecond"); + auto event_id = gen.eventId(0); + auto event_time_offset = gen.eventTimeOffset(0); + + parser->startAsync(event_id.data(), event_time_offset.data(), + gen.generateBasicRange(0)); + + parser->wait(); + gen.checkEventLists(); + } + + void testParsingFull_1Rank_1Bank() { + anonymous::FakeParserDataGenerator<int32_t, int64_t, float> gen(1, 10, 2); + auto parser = gen.generateTestParser(); + parser->setEventDataPartitioner( + Kernel::make_unique<EventDataPartitioner<int32_t, int64_t, float>>( + 1, PulseTimeGenerator<int32_t, int64_t>{ + gen.eventIndex(0), gen.eventTimeZero(), "nanosecond", 0})); + parser->setEventTimeOffsetUnit("microsecond"); + auto event_id = gen.eventId(0); + auto event_time_offset = gen.eventTimeOffset(0); + + parser->startAsync(event_id.data(), event_time_offset.data(), + gen.generateBasicRange(0)); + + parser->wait(); + gen.checkEventLists(); + } + + void testParsingFull_1Rank_2Banks() { + int numBanks = 2; + anonymous::FakeParserDataGenerator<int32_t, int64_t, double> gen(numBanks, + 10, 7); + auto parser = gen.generateTestParser(); + + for (int i = 0; i < numBanks; i++) { + parser->setEventDataPartitioner( + Kernel::make_unique<EventDataPartitioner<int32_t, int64_t, double>>( + 1, PulseTimeGenerator<int32_t, int64_t>{ + gen.eventIndex(i), gen.eventTimeZero(), "nanosecond", 0})); + parser->setEventTimeOffsetUnit("microsecond"); + auto event_id = gen.eventId(i); + auto event_time_offset = gen.eventTimeOffset(i); + + parser->startAsync(event_id.data(), event_time_offset.data(), + gen.generateBasicRange(i)); + parser->wait(); + } + gen.checkEventLists(); + } + + void testParsingFull_InParts_1Rank_1Bank() { + anonymous::FakeParserDataGenerator<int32_t, int64_t, double> gen(1, 11, 7); + auto parser = gen.generateTestParser(); + parser->setEventDataPartitioner( + Kernel::make_unique<EventDataPartitioner<int32_t, int64_t, double>>( + 1, PulseTimeGenerator<int32_t, int64_t>{ + gen.eventIndex(0), gen.eventTimeZero(), "nanosecond", 0})); + parser->setEventTimeOffsetUnit("microsecond"); + auto event_id = gen.eventId(0); + auto event_time_offset = gen.eventTimeOffset(0); + + auto parts = 5; + auto portion = event_id.size() / parts; + + for (int i = 0; i < parts; ++i) { + auto offset = portion * i; + + // Needed so that no data is missed. + if (i == (parts - 1)) + portion = event_id.size() - offset; + + Chunker::LoadRange range{0, offset, portion}; + parser->startAsync(event_id.data() + offset, + event_time_offset.data() + offset, range); + parser->wait(); + } + gen.checkEventLists(); + } + + void testParsingFull_InParts_1Rank_3Banks() { + size_t numBanks = 3; + anonymous::FakeParserDataGenerator<int32_t, int64_t, double> gen(3, 20, 7); + auto parser = gen.generateTestParser(); + + for (size_t bank = 0; bank < numBanks; bank++) { + parser->setEventDataPartitioner( + Kernel::make_unique<EventDataPartitioner<int32_t, int64_t, double>>( + 1, PulseTimeGenerator<int32_t, int64_t>{gen.eventIndex(bank), + gen.eventTimeZero(), + "nanosecond", 0})); + parser->setEventTimeOffsetUnit("microsecond"); + auto event_id = gen.eventId(bank); + auto event_time_offset = gen.eventTimeOffset(bank); + + auto parts = 11; + auto portion = event_id.size() / parts; + + for (int i = 0; i < parts; ++i) { + auto offset = portion * i; + + // Needed so that no data is missed. + if (i == (parts - 1)) + portion = event_id.size() - offset; + + Chunker::LoadRange range{bank, offset, portion}; + parser->startAsync(event_id.data() + offset, + event_time_offset.data() + offset, range); + parser->wait(); + } + } + gen.checkEventLists(); + } + + void test_setEventTimeOffsetUnit() { + std::vector<std::vector<int>> rankGroups; + std::vector<int32_t> bankOffsets{0}; + std::vector<TofEvent> eventList; + std::vector<std::vector<TofEvent> *> eventLists{&eventList}; + Parallel::Communicator comm; + EventParser<double> parser(comm, rankGroups, bankOffsets, eventLists); + PulseTimeGenerator<int32_t, int32_t> pulseTimes({0}, {0}, "nanosecond", 0); + + parser.setEventDataPartitioner( + Kernel::make_unique<EventDataPartitioner<int32_t, int32_t, double>>( + 1, std::move(pulseTimes))); + + int32_t event_id{0}; + const double event_time_offset{1.5}; + const Chunker::LoadRange range{0, 0, 1}; + + parser.startAsync(&event_id, &event_time_offset, range); + parser.wait(); + TS_ASSERT_EQUALS(eventList.size(), 1); + TS_ASSERT_EQUALS(eventList[0].tof(), 0.0); + + parser.setEventTimeOffsetUnit("second"); + parser.startAsync(&event_id, &event_time_offset, range); + parser.wait(); + TS_ASSERT_EQUALS(eventList.size(), 2); + TS_ASSERT_EQUALS(eventList[1].tof(), 1.5e6); + + parser.setEventTimeOffsetUnit("microsecond"); + parser.startAsync(&event_id, &event_time_offset, range); + parser.wait(); + TS_ASSERT_EQUALS(eventList.size(), 3); + TS_ASSERT_EQUALS(eventList[2].tof(), 1.5); + + parser.setEventTimeOffsetUnit("nanosecond"); + parser.startAsync(&event_id, &event_time_offset, range); + parser.wait(); + TS_ASSERT_EQUALS(eventList.size(), 4); + TS_ASSERT_EQUALS(eventList[3].tof(), 1.5e-3); + + TS_ASSERT_THROWS_EQUALS( + parser.setEventTimeOffsetUnit("millisecond"), + const std::runtime_error &e, std::string(e.what()), + "EventParser: unsupported unit `millisecond` for event_time_offset"); + } + +private: + template <typename T, typename IndexType, typename TimeZeroType, + typename TimeOffsetType> + void + doTestRankData(const T &rankData, + anonymous::FakeParserDataGenerator<IndexType, TimeZeroType, + TimeOffsetType> &gen, + const Chunker::LoadRange &range) { + PulseTimeGenerator<IndexType, TimeZeroType> pulseTimes( + gen.eventIndex(0), gen.eventTimeZero(), "nanosecond", 0); + pulseTimes.seek(range.eventOffset); + TS_ASSERT_EQUALS(rankData[0].size(), range.eventCount); + for (const auto &item : rankData[0]) { + TS_ASSERT_EQUALS(item.pulseTime, pulseTimes.next()); + } + } +}; + +class EventParserTestPerformance : public CxxTest::TestSuite { +public: + // This pair of boilerplate methods prevent the suite being created statically + // This means the constructor isn't called when running other tests + static EventParserTestPerformance *createSuite() { + return new EventParserTestPerformance(); + } + static void destroySuite(EventParserTestPerformance *suite) { delete suite; } + + EventParserTestPerformance() : gen(NUM_BANKS, 1000, 7, 100) { + event_ids.resize(NUM_BANKS); + event_time_offsets.resize(NUM_BANKS); + // Copy here so this does not have to happen + // in performance test + for (size_t i = 0; i < NUM_BANKS; ++i) { + event_time_offsets[i] = gen.eventTimeOffset(i); + event_ids[i] = gen.eventId(i); + } + + parser = gen.generateTestParser(); + for (auto &eventList : m_eventLists) + m_eventListPtrs.emplace_back(&eventList); + } + + void testCompletePerformance() { + for (size_t i = 0; i < NUM_BANKS; ++i) { + parser->setEventDataPartitioner( + Kernel::make_unique<EventDataPartitioner<int32_t, int64_t, double>>( + 1, PulseTimeGenerator<int32_t, int64_t>{ + gen.eventIndex(i), gen.eventTimeZero(), "nanosecond", 0})); + parser->setEventTimeOffsetUnit("microsecond"); + parser->startAsync(event_ids[i].data(), event_time_offsets[i].data(), + gen.generateBasicRange(i)); + parser->wait(); + } + } + + void testExtractEventsPerformance() { + for (size_t bank = 0; bank < NUM_BANKS; bank++) { + EventDataPartitioner<int32_t, int64_t, double> partitioner( + 1, {gen.eventIndex(bank), gen.eventTimeZero(), "nanosecond", 0}); + partitioner.partition(rankData, event_ids[bank].data(), + event_time_offsets[bank].data(), + gen.generateBasicRange(bank)); + } + } + +private: + const size_t NUM_BANKS = 7; + std::vector<std::vector<int32_t>> event_ids; + std::vector<std::vector<double>> event_time_offsets; + anonymous::FakeParserDataGenerator<int32_t, int64_t, double> gen; + boost::shared_ptr<EventParser<double>> parser; + std::vector<std::vector<EventParser<double>::Event>> rankData; + std::vector<std::vector<TofEvent>> m_eventLists{NUM_BANKS * 1000}; + std::vector<std::vector<TofEvent> *> m_eventListPtrs; +}; +#endif /* MANTID_PARALLEL_COLLECTIVESTEST_H_ */ diff --git a/Framework/Parallel/test/NonblockingTest.h b/Framework/Parallel/test/NonblockingTest.h new file mode 100644 index 0000000000000000000000000000000000000000..bca1e571fdbb2e36fcb93a6bcabdf25817d71fad --- /dev/null +++ b/Framework/Parallel/test/NonblockingTest.h @@ -0,0 +1,41 @@ +#ifndef MANTID_PARALLEL_NONBLOCKINGTEST_H_ +#define MANTID_PARALLEL_NONBLOCKINGTEST_H_ + +#include <cxxtest/TestSuite.h> + +#include "MantidParallel/Communicator.h" +#include "MantidParallel/Nonblocking.h" +#include "MantidTestHelpers/ParallelRunner.h" + +using namespace Mantid; +using namespace Parallel; + +namespace { +void run_wait_all(const Communicator &comm) { + int64_t data = 123456789 + comm.rank(); + int dest = (comm.rank() + 1) % comm.size(); + int src = (comm.rank() + comm.size() - 1) % comm.size(); + int tag1 = 123; + int tag2 = 124; + int64_t result; + + std::vector<Request> requests; + requests.emplace_back(comm.irecv(src, tag1, result)); + requests.emplace_back(comm.irecv(src, tag2, result)); + comm.send(dest, tag1, data); + comm.send(dest, tag2, data); + TS_ASSERT_THROWS_NOTHING(wait_all(requests.begin(), requests.end())); +} +} + +class NonblockingTest : public CxxTest::TestSuite { +public: + // This pair of boilerplate methods prevent the suite being created statically + // This means the constructor isn't called when running other tests + static NonblockingTest *createSuite() { return new NonblockingTest(); } + static void destroySuite(NonblockingTest *suite) { delete suite; } + + void test_wait_all() { ParallelTestHelpers::runParallel(run_wait_all); } +}; + +#endif /* MANTID_PARALLEL_NONBLOCKINGTEST_H_ */ diff --git a/Framework/Parallel/test/PulseTimeGeneratorTest.h b/Framework/Parallel/test/PulseTimeGeneratorTest.h new file mode 100644 index 0000000000000000000000000000000000000000..c26e978eb9d749e03315b93b246ad738fec079e4 --- /dev/null +++ b/Framework/Parallel/test/PulseTimeGeneratorTest.h @@ -0,0 +1,178 @@ +#ifndef MANTID_PARALLEL_PULSETIMEGENERATORTEST_H_ +#define MANTID_PARALLEL_PULSETIMEGENERATORTEST_H_ + +#include <cxxtest/TestSuite.h> + +#include <type_traits> + +#include "MantidParallel/IO/PulseTimeGenerator.h" + +using PulseTimeGenerator = + Mantid::Parallel::IO::PulseTimeGenerator<int32_t, int32_t>; +using Mantid::Parallel::IO::detail::scaleFromUnit; +using Mantid::Parallel::IO::detail::IntOrFloat64Bit; + +class PulseTimeGeneratorTest : public CxxTest::TestSuite { +public: + // This pair of boilerplate methods prevent the suite being created statically + // This means the constructor isn't called when running other tests + static PulseTimeGeneratorTest *createSuite() { + return new PulseTimeGeneratorTest(); + } + static void destroySuite(PulseTimeGeneratorTest *suite) { delete suite; } + + void test_scaleFromUnit_integer_converted_to_nanoseconds() { + // DateAndTime expects int64_t to be in nanoseconds so if unit does not + // match there must be an appropriate conversion factor. + TS_ASSERT_EQUALS(scaleFromUnit<int32_t>("nanosecond"), 1); + TS_ASSERT_EQUALS(scaleFromUnit<uint32_t>("nanosecond"), 1); + TS_ASSERT_EQUALS(scaleFromUnit<int64_t>("nanosecond"), 1); + TS_ASSERT_EQUALS(scaleFromUnit<uint64_t>("nanosecond"), 1); + // Would supporting anything by `second` make sense for integers? + TS_ASSERT_THROWS_EQUALS( + scaleFromUnit<int64_t>("second"), const std::runtime_error &e, + std::string(e.what()), + "PulseTimeGenerator: unsupported unit `second` for event_time_zero"); + } + + void test_scaleFromUnit_float_converted_to_microseconds() { + // DateAndTime expects double to be in seconds so if unit does not match + // there must be an appropriate conversion factor. + TS_ASSERT_EQUALS(scaleFromUnit<float>("second"), 1.0); + TS_ASSERT_EQUALS(scaleFromUnit<double>("second"), 1.0); + TS_ASSERT_EQUALS(scaleFromUnit<float>("microsecond"), 1e-6); + TS_ASSERT_EQUALS(scaleFromUnit<double>("microsecond"), 1e-6); + TS_ASSERT_EQUALS(scaleFromUnit<float>("nanosecond"), 1e-9); + TS_ASSERT_EQUALS(scaleFromUnit<double>("nanosecond"), 1e-9); + // Currently not supported (but in principle we could). + TS_ASSERT_THROWS_EQUALS(scaleFromUnit<float>("millisecond"), + const std::runtime_error &e, std::string(e.what()), + "PulseTimeGenerator: unsupported unit " + "`millisecond` for event_time_zero"); + } + + void test_scaleFromUnit_does_not_lose_precision() { + // Return type should be double, even if input is float + TS_ASSERT_DIFFERS(scaleFromUnit<float>("nanosecond"), + static_cast<float>(1e-9)); + } + + void test_IntOrFloat64Bit() { + TS_ASSERT((std::is_same<IntOrFloat64Bit<int32_t>::type, int64_t>::value)); + TS_ASSERT((std::is_same<IntOrFloat64Bit<uint32_t>::type, int64_t>::value)); + TS_ASSERT((std::is_same<IntOrFloat64Bit<int64_t>::type, int64_t>::value)); + TS_ASSERT((std::is_same<IntOrFloat64Bit<uint64_t>::type, int64_t>::value)); + TS_ASSERT((std::is_same<IntOrFloat64Bit<float>::type, double>::value)); + TS_ASSERT((std::is_same<IntOrFloat64Bit<double>::type, double>::value)); + } + + void test_empty() { + PulseTimeGenerator pulseTimes({}, {}, "nanosecond", 1000); + TS_ASSERT_THROWS_EQUALS(pulseTimes.seek(0), const std::runtime_error &e, + std::string(e.what()), + "Empty event index in PulseTimeGenerator"); + } + + void test_no_seek() { + PulseTimeGenerator pulseTimes({0}, {17}, "nanosecond", 1000); + // seek() must always called before the first next() call + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 0); + } + + void test_size_1() { + PulseTimeGenerator pulseTimes({0}, {17}, "nanosecond", 1000); + pulseTimes.seek(0); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1017); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1017); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1017); + } + + void test_size_2() { + PulseTimeGenerator pulseTimes({0, 2}, {4, 8}, "nanosecond", 1000); + pulseTimes.seek(0); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1004); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1004); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1008); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1008); + } + + void test_empty_pulse_at_start() { + PulseTimeGenerator pulseTimes({0, 0, 2}, {4, 8, 12}, "nanosecond", 1000); + pulseTimes.seek(0); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1008); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1008); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1012); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1012); + } + + void test_empty_pulse() { + PulseTimeGenerator pulseTimes({0, 2, 2, 3}, {4, 8, 12, 16}, "nanosecond", + 1000); + pulseTimes.seek(0); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1004); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1004); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1012); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1016); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1016); + } + + void test_empty_pulse_at_end() { + PulseTimeGenerator pulseTimes({0, 2, 2}, {4, 8, 12}, "nanosecond", 1000); + pulseTimes.seek(0); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1004); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1004); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1012); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1012); + } + + void test_seek_to_pulse() { + PulseTimeGenerator pulseTimes({0, 2}, {4, 8}, "nanosecond", 1000); + pulseTimes.seek(2); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1008); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1008); + } + + void test_seek_into_pulse() { + PulseTimeGenerator pulseTimes({0, 2}, {4, 8}, "nanosecond", 1000); + pulseTimes.seek(1); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1004); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1008); + } + + void test_seek_with_empty_pulse() { + PulseTimeGenerator pulseTimes({0, 2, 2, 3}, {4, 8, 12, 16}, "nanosecond", + 1000); + pulseTimes.seek(2); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1012); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1016); + } + + void test_seek_multiple_times() { + PulseTimeGenerator pulseTimes({0, 2, 2, 3}, {4, 8, 12, 16}, "nanosecond", + 1000); + pulseTimes.seek(1); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1004); + pulseTimes.seek(3); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1016); + } + + void test_seek_backwards() { + PulseTimeGenerator pulseTimes({0, 2, 2, 3}, {4, 8, 12, 16}, "nanosecond", + 1000); + pulseTimes.seek(1); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1004); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1012); + pulseTimes.seek(1); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1004); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 1012); + } + + void test_event_time_zero_type_conversion() { + Mantid::Parallel::IO::PulseTimeGenerator<int32_t, float> pulseTimes( + {0}, {1.5}, "microsecond", 10000); + pulseTimes.seek(0); + TS_ASSERT_EQUALS(pulseTimes.next().totalNanoseconds(), 11500); + } +}; + +#endif /* MANTID_PARALLEL_PULSETIMEGENERATORTEST_H_ */ diff --git a/Framework/PythonInterface/plugins/algorithms/BASISReduction.py b/Framework/PythonInterface/plugins/algorithms/BASISReduction.py index 35b7c4ee2b4a1d44666c65ae3f44ab6011c057d2..16a9ffa65b9cc6261b9a187c038ff994a532df76 100644 --- a/Framework/PythonInterface/plugins/algorithms/BASISReduction.py +++ b/Framework/PythonInterface/plugins/algorithms/BASISReduction.py @@ -104,12 +104,17 @@ class BASISReduction(PythonAlgorithm): self.declareProperty("DoIndividual", False, "Do each run individually") self.declareProperty("NoMonitorNorm", False, "Stop monitor normalization") + self.declareProperty('ExcludeTimeSegment', '', + 'Exclude a contigous time segment; '+ + 'Examples: "71546:0-60" filter run 71546 from '+ + 'start to 60 seconds, "71546:300-600", '+ + '"71546:120-end" from 120s to the end of the run') grouping_type = ["None", "Low-Resolution", "By-Tube"] self.declareProperty("GroupDetectors", "None", StringListValidator(grouping_type), "Switch for grouping detectors") - self.declareProperty("NormalizeToFirst", False, "Normalize spectra \ - to intensity of spectrum with lowest Q?") + self.declareProperty('NormalizeToFirst', False, 'Normalize spectra '+ + 'to intensity of spectrum with lowest Q?') # Properties affected by the reflection selected titleReflection = "Reflection Selector" @@ -353,6 +358,10 @@ class BASISReduction(PythonAlgorithm): sapi.DeleteWorkspace(self._normWs) # Delete vanadium S(Q) if self._normalizationType == "by Q slice": sapi.DeleteWorkspace(normWs) # Delete vanadium events file + if self.getProperty("ExcludeTimeSegment").value: + sapi.DeleteWorkspace('splitted_unfiltered') + sapi.DeleteWorkspace('splitter') + sapi.DeleteWorkspace('TOFCorrectWS') def _getRuns(self, rlist, doIndiv=True): """ @@ -411,12 +420,16 @@ class BASISReduction(PythonAlgorithm): kwargs = {"BankName": "bank2"} # 311 analyzers only in bank2 else: kwargs = {} + sapi.LoadEventNexus(Filename=run_file, OutputWorkspace=ws_name, **kwargs) + if str(run)+':' in self.getProperty("ExcludeTimeSegment").value: + self._filterEvents(str(run), ws_name) if not self._noMonNorm: sapi.LoadNexusMonitors(Filename=run_file, OutputWorkspace=mon_ws_name) + if sam_ws != ws_name: sapi.Plus(LHSWorkspace=sam_ws, RHSWorkspace=ws_name, @@ -560,6 +573,53 @@ class BASISReduction(PythonAlgorithm): Factor=1./maximumYvalue, Operation="Multiply") + def generateSplitterWorkspace(self, fragment): + r"""Create a table workspace with time intervals to keep + + Parameters + ---------- + fragment: str + a-b start and end of time fragment to filter out + """ + inf = 86400 # a run a full day long + a, b = fragment.split('-') + b = inf if 'end' in b else float(b) + a = float(a) + splitter = sapi.CreateEmptyTableWorkspace(OutputWorkspace='splitter') + splitter.addColumn('double', 'start') + splitter.addColumn('double', 'stop') + splitter.addColumn('str', 'target') #, 'str') + if a == 0.0: + splitter.addRow([b, inf, '0']) + elif b == inf: + splitter.addRow([0, a, '0']) + else: + splitter.addRow([0, a, '0']) + splitter.addRow([b, inf, '0']) + + def _filterEvents(self, run, ws_name): + r"""Filter out ExcludeTimeSegment if applicable + + Parameters + ---------- + run: str + run number + ws_name : str + name of the workspace to filter + """ + for run_fragment in self.getProperty("ExcludeTimeSegment").value.split(';'): + if run+':' in run_fragment: + self.generateSplitterWorkspace(run_fragment.split(':')[1]) + sapi.FilterEvents(InputWorkspace=ws_name, + SplitterWorkspace='splitter', + OutputWorkspaceBaseName='splitted', + GroupWorkspaces=True, + OutputWorkspaceIndexedFrom1=True, + RelativeTime=True) + sapi.UnGroupWorkspace('splitted') + sapi.RenameWorkspace(InputWorkspace='splitted_0', + OutputWorkspace=ws_name) + break # Register algorithm with Mantid. AlgorithmFactory.subscribe(BASISReduction) diff --git a/Framework/PythonInterface/plugins/algorithms/EnggFocus.py b/Framework/PythonInterface/plugins/algorithms/EnggFocus.py index 7e658088f19150698b68417007aa4537b38f98ac..28ecf993a48d5403c04f3ac7fa2bba021ee4fa61 100644 --- a/Framework/PythonInterface/plugins/algorithms/EnggFocus.py +++ b/Framework/PythonInterface/plugins/algorithms/EnggFocus.py @@ -62,7 +62,7 @@ class EnggFocus(PythonAlgorithm): self.declareProperty(self.INDICES_PROP_NAME, '', direction=Direction.Input, doc='Sets the spectrum numbers for the detectors ' - 'that should be considered in the focussing operation (all others will be ' + 'that should be considered in the focusing operation (all others will be ' 'ignored). This option cannot be used together with Bank, as they overlap. ' 'You can give multiple ranges, for example: "0-99", or "0-9, 50-59, 100-109".') @@ -160,8 +160,28 @@ class EnggFocus(PythonAlgorithm): # converting units), so I guess that's what users will expect self._convert_to_distribution(input_ws) + if bank: + self._add_bank_number(input_ws, bank) + self.setProperty("OutputWorkspace", input_ws) + def _bank_to_int(self, bank): + if bank == "North": + return "1" + if bank == "South": + return "2" + if bank in ("1", "2"): + return bank + raise RuntimeError("Invalid value for bank: \"{}\" of type {}".format(bank, type(bank))) + + def _add_bank_number(self, ws, bank): + alg = self.createChildAlgorithm("AddSampleLog") + alg.setProperty("Workspace", ws) + alg.setProperty("LogName", "bankid") + alg.setProperty("LogText", self._bank_to_int(bank)) + alg.setProperty("LogType", "Number") + alg.execute() + def _mask_bins(self, wks, min_bins, max_bins): """ Mask multiple ranges of bins, given multiple pairs min-max diff --git a/Framework/PythonInterface/plugins/algorithms/GSASIIRefineFitPeaks.py b/Framework/PythonInterface/plugins/algorithms/GSASIIRefineFitPeaks.py index 55f1ceb1e60a811d82fe135d09bdda65c3d78896..bb96855ec3e64752552820e6be23c2c51d2e3d50 100644 --- a/Framework/PythonInterface/plugins/algorithms/GSASIIRefineFitPeaks.py +++ b/Framework/PythonInterface/plugins/algorithms/GSASIIRefineFitPeaks.py @@ -1,11 +1,12 @@ from __future__ import (absolute_import, division, print_function) -from mantid.api import AlgorithmFactory, ITableWorkspaceProperty, FileAction, FileProperty, \ - MatrixWorkspaceProperty, Progress, PropertyMode, PythonAlgorithm -from mantid.kernel import Direction, FloatArrayProperty, Property, StringListValidator -import mantid.simpleapi as msapi +from contextlib import contextmanager +import os +import sys +import tempfile -# Too many properties! -#pylint: disable=too-many-instance-attributes +from mantid.kernel import * +from mantid.api import * +import mantid.simpleapi as mantid class GSASIIRefineFitPeaks(PythonAlgorithm): @@ -15,974 +16,232 @@ class GSASIIRefineFitPeaks(PythonAlgorithm): (https://subversion.xray.aps.anl.gov/trac/pyGSAS) """ + PROP_GROUP_PAWLEY_PARAMS = "Pawley Parameters" + PROP_GSAS_PROJ_PATH = "SaveGSASIIProjectFile" + PROP_INPUT_WORKSPACE = "InputWorkspace" + PROP_OUT_GOF = "GOF" + PROP_OUT_GROUP_RESULTS = "Results" + PROP_OUT_LATTICE_PARAMS = "LatticeParameters" + PROP_OUT_RWP = "Rwp" + PROP_PATH_TO_GSASII = "PathToGSASII" + PROP_PATH_TO_INST_PARAMS = "InstrumentFile" + PROP_PATH_TO_PHASE = "PhaseInfoFile" + PROP_PAWLEY_DMIN = "PawleyDMin" + PROP_PAWLEY_NEGATIVE_WEIGHT = "PawleyNegativeWeight" + PROP_REFINEMENT_METHOD = "RefinementMethod" + PROP_SUPPRESS_GSAS_OUTPUT = "MuteGSASII" + PROP_WORKSPACE_INDEX = "WorkspaceIndex" + + DEFAULT_REFINEMENT_PARAMS = {"set": + {"Background": {"no.coeffs": 3, + "refine": True}, + "Sample Parameters": ["Scale"]}} + LATTICE_TABLE_PARAMS = ["length_a", "length_b", "length_c", "angle_alpha", "angle_beta", "angle_gamma", "volume"] + REFINEMENT_METHODS = ["Pawley refinement", "Rietveld refinement", "Peak fitting"] + def category(self): - """ - Override required for Mantid algorithms - """ return "Diffraction\\Engineering;Diffraction\\Fitting" def name(self): - """ - Override required for Mantid algorithms - """ return "GSASIIRefineFitPeaks" def summary(self): - """ - Override required for Mantid algorithms - """ - return ("Uses GSAS-II (powder diffraction and structure modules) to perform whole " - "pattern refinement of lattice parameters (or fit peaks) on an diffraction " - "spectrum") - - def __init__(self): - PythonAlgorithm.__init__(self) - - # For the wsPython app underlying GSAS-II - self._gsas2_app = None - - self.PROP_INPUT_WORKSPACE = 'InputWorkspace' - self.PROP_WORKSPACE_INDEX = 'WorkspaceIndex' - self.PROP_INSTR_FILE = 'InstrumentFile' - self.PROP_PHASE_INFO_FILE = 'PhaseInfoFile' - self.PROP_PATH_TO_GSASII = 'PathToGSASII' - self.PROP_PAWLEY_DMIN = "PawleyDmin" - self.PROP_PAWLEY_NEG_WEIGHT = "PawleyNegativeWeight" - self.PROP_BACKGROUND_TYPE = 'BackgroundType' - self.PROP_MINX = 'MinX' - self.PROP_MAXX = 'MaxX' - self.PROP_EXPECTED_PEAKS = "ExpectedPeaks" - self.PROP_EXPECTED_PEAKS_FROM_FILE = "ExpectedPeaksFromFile" - self.PROP_OUT_LATTICE_PARAMS = 'LatticeParameters' - self.PROP_OUT_FITTED_PARAMS = 'FittedPeakParameters' - self.PROP_OUT_PROJECT_FILE = 'SaveGSASIIProjectFile' - self.PROP_OUT_GOF = 'GoF' - self.PROP_OUT_RWP = 'Rwp' - self.PROP_REFINE_CENTER = 'RefineCenter' - self.PROP_REFINE_INTENSITY = 'RefineIntensity' - self.PROP_REFINE_ALPHA = 'RefineAlpha' - self.PROP_REFINE_BETA = 'RefineBeta' - self.PROP_REFINE_SIGMA = 'RefineSigma' - self.PROP_REFINE_GAMMA = 'RefineGamma' - self.PROP_METHOD = "Method" + return ("Perform Rietveld or Pawley refinement of lattice parameters on a diffraction spectrum " + "using GSAS-II scriptable API") def PyInit(self): - - refine_methods = ["Pawley refinement", "Rietveld refinement", "Peak fitting"] - self.declareProperty(self.PROP_METHOD, defaultValue = refine_methods[0], - validator = StringListValidator(refine_methods), - doc = 'Rietveld corresponds to the Calculate/Refine option of the ' - 'GSAS-II GUI. Peak fitting is single peak (does not use phase ' - 'information and corresponds to the option ' - 'Peaks List/Peak Fitting/PeakFitType of the GSAS-II GUI. The ' - 'third alternative requires a list of peaks which can be bassed in ' - 'the properties ' + self.PROP_EXPECTED_PEAKS + ' and ' + - self.PROP_EXPECTED_PEAKS_FROM_FILE + '.') - - self.declareProperty(MatrixWorkspaceProperty(self.PROP_INPUT_WORKSPACE, '', - optional = PropertyMode.Mandatory, - direction = Direction.Input), - doc = 'Workspace with spectra to fit peaks. ToF is expected X unit.') - - self.declareProperty(self.PROP_WORKSPACE_INDEX, 0, - doc = 'Index of the workspace for the spectrum to fit. By default ' - 'the first spectrum will be processed (that is, the only spectrum ' - 'for focussed data workspaces.', direction = Direction.Input) - - self.declareProperty(FileProperty(name = self.PROP_INSTR_FILE, defaultValue = '', - action = FileAction.Load, - extensions = [".par", ".prm", ".ipar", ".iparm"]), - doc = 'File with instrument parameters (in GSAS format).') - - self.declareProperty(FileProperty(name = self.PROP_PHASE_INFO_FILE, defaultValue = '', - action = FileAction.OptionalLoad, extensions = [".cif"]), - doc = 'File with phase information for the material.') - - self.declareProperty(FileProperty(name = self.PROP_PATH_TO_GSASII, defaultValue = '', - action = FileAction.OptionalDirectory), - doc = 'Optional path to GSAS-II software installation. ' - 'This will be used to import several Python modules from GSAS-II.') - - GRP_RESULTS = "Results" - - self.declareProperty('GoF', 0.0, direction = Direction.Output, - doc = 'Goodness of fit value (Chi squared).') - - self.declareProperty('Rwp', 0.0, direction = Direction.Output, - doc = 'Weighted profile R-factor (Rwp) discrepancy index for the ' - 'goodness of fit.') - - self.declareProperty(ITableWorkspaceProperty(self.PROP_OUT_LATTICE_PARAMS, "", Direction.Output), - doc = 'Table to output the the lattice parameters (refined).') - - self.declareProperty(self.PROP_OUT_FITTED_PARAMS, "", direction=Direction.Input, - doc = "Name for an (output) table of fitted parameters. This is used " - "with the peak fitting method. The table will have one row per peak " - "found.") - - # This is mandatory. We could also make it FileAction.OptionalSave, and use a temporary - # project file when the option is not given by the user. - self.declareProperty(FileProperty(name = self.PROP_OUT_PROJECT_FILE, defaultValue = '', - direction = Direction.Input, - action = FileAction.Save, extensions = [".gpx"]), - doc = 'GSAS-II project file (that can be openened in the GSAS-II GUI).') - - self.setPropertyGroup(self.PROP_OUT_GOF, GRP_RESULTS) - self.setPropertyGroup(self.PROP_OUT_RWP, GRP_RESULTS) - self.setPropertyGroup(self.PROP_OUT_LATTICE_PARAMS, GRP_RESULTS) - self.setPropertyGroup(self.PROP_OUT_FITTED_PARAMS, GRP_RESULTS) - self.setPropertyGroup(self.PROP_OUT_PROJECT_FILE, GRP_RESULTS) - - GRP_FITTING_OPTS = "Fitting options" - background_types = ["Chebyshev", "None"] - self.declareProperty(self.PROP_BACKGROUND_TYPE, defaultValue = background_types[0], - validator = StringListValidator(background_types), - doc = 'Type of background for the peak fitting. Currently only the ' - 'default option of GSAS-II (chebyshev) is supported.') - - self.declareProperty(self.PROP_MINX, Property.EMPTY_DBL, - direction = Direction.Input, - doc = "Minimum x value for the fitting, in the same units as the input " - "workspace (TOF). Defines the range or domain of fitting together " - "with the property {0}. Leave empty to use the whole range". - format(self.PROP_MAXX)) - - self.declareProperty(self.PROP_MAXX, Property.EMPTY_DBL, - direction = Direction.Input, - doc = "Maximum x value for the fitting, in the same units as the input " - "workspace (TOF). Defines the range or domain of fitting together " - "with the property {0}. Leave empty to use the whole range". - format(self.PROP_MINX)) - - self.setPropertyGroup(self.PROP_BACKGROUND_TYPE, GRP_FITTING_OPTS) - self.setPropertyGroup(self.PROP_MINX, GRP_FITTING_OPTS) - self.setPropertyGroup(self.PROP_MAXX, GRP_FITTING_OPTS) - - GRP_PAWLEY_OPTIONS = "Pawley refinement options" - - self.declareProperty(self.PROP_PAWLEY_DMIN, 1.0, direction = Direction.Input, - doc = "For Pawley refiment: as defined in GSAS-II, the minimum d-spacing " - "to be used in a Pawley refinement. Please refer to the GSAS-II " - "documentation for full details.") - - self.declareProperty(self.PROP_PAWLEY_NEG_WEIGHT, 0.0, direction = Direction.Input, - doc = "For Pawley refinement: as defined in GSAS-II, the weight for a " - "penalty function applied during a Pawley refinement on resulting negative " - "intensities. Please refer to the GSAS-II documentation for full details.") - - self.setPropertyGroup(self.PROP_PAWLEY_DMIN, GRP_PAWLEY_OPTIONS) - self.setPropertyGroup(self.PROP_PAWLEY_NEG_WEIGHT, GRP_PAWLEY_OPTIONS) - - GRP_PEAKS = "Expected peaks (phase information takes precedence)" - - self.declareProperty(FloatArrayProperty(self.PROP_EXPECTED_PEAKS, [], - direction = Direction.Input), - "A list of dSpacing values for the peak centers. These will be " - "converted into TOF to find expected peaks.") - - self.declareProperty(FileProperty(name = self.PROP_EXPECTED_PEAKS_FROM_FILE, defaultValue = "", - action = FileAction.OptionalLoad, extensions = [".csv"], - direction = Direction.Input), - doc = "Load from this file a list of dSpacing values to be converted " - "into TOF . This takes precedence over '" + self.PROP_EXPECTED_PEAKS + "' " - "when both options are given.") - - self.setPropertyGroup(self.PROP_EXPECTED_PEAKS, GRP_PEAKS) - self.setPropertyGroup(self.PROP_EXPECTED_PEAKS_FROM_FILE, GRP_PEAKS) - - GRP_PARAMS = "Refinement of peak parameters" - - self.declareProperty(name = self.PROP_REFINE_CENTER, defaultValue = False, - doc = 'Whether to refine the peak centers.') - - self.declareProperty(name = self.PROP_REFINE_INTENSITY, defaultValue = False, - doc = 'Whether to refine the peak function intensity parameters ' - '(assuming a shape of type back-to-back exponential convoluted ' - 'with pseudo-voigt (BackToBackExponentialPV).') - - self.declareProperty(name = self.PROP_REFINE_ALPHA, defaultValue = False, - doc = 'Whether to refine the peak function beta parameters ' - '(assuming a BackToBackExponentialPV peak shape.') - - self.declareProperty(name = self.PROP_REFINE_BETA, defaultValue = False, - doc = 'Whether to refine the peak function beta parameters ' - '(assuming a BackToBackExponentialPV peak shape.') - - self.declareProperty(name = self.PROP_REFINE_SIGMA, defaultValue = True, - doc = 'Whether to refine the peak function sigma parameters ' - '(assuming a BackToBackExponentialPV peak shape.') - - self.declareProperty(name = self.PROP_REFINE_GAMMA, defaultValue = True, - doc = 'Whether to refine the peak function gamma parameters ' - '(assuming a BackToBackExponentialPV peak shape.') - - self.setPropertyGroup(self.PROP_REFINE_CENTER, GRP_PARAMS) - self.setPropertyGroup(self.PROP_REFINE_INTENSITY, GRP_PARAMS) - self.setPropertyGroup(self.PROP_REFINE_ALPHA, GRP_PARAMS) - self.setPropertyGroup(self.PROP_REFINE_BETA, GRP_PARAMS) - self.setPropertyGroup(self.PROP_REFINE_SIGMA, GRP_PARAMS) - self.setPropertyGroup(self.PROP_REFINE_GAMMA, GRP_PARAMS) - - def validateInputs(self): - errors = {} - pfm_name = "Peak fitting" - # This could check if the required inputs for different methods have been provided - if pfm_name == self.getPropertyValue(self.PROP_METHOD) and\ - not self.getPropertyValue(self.PROP_OUT_FITTED_PARAMS): - errors[self.PROP_OUT_FITTED_PARAMS] = ("Must be provided when the method is {0}.". - format(pfm_name)) - - if pfm_name != self.getPropertyValue(self.PROP_METHOD) and\ - not self.getPropertyValue(self.PROP_PHASE_INFO_FILE): - errors[self.PROP_PHASE_INFO_FILE] = ("Must be provided when using cell lattice " - "parameters revinement methods") - - min_x = self.getPropertyValue(self.PROP_MINX) - max_x = self.getPropertyValue(self.PROP_MAXX) - if min_x != Property.EMPTY_DBL and max_x != Property.EMPTY_DBL and min_x > max_x: - errors[self.PROP_MINX] = ("The minimum given in {0} must be <= than the maximum " - "given in {1}".format(self.PROP_MINX, self.PROP_MAXX)) - - return errors + self.declareProperty(name=self.PROP_REFINEMENT_METHOD, defaultValue=self.REFINEMENT_METHODS[0], + validator=StringListValidator(self.REFINEMENT_METHODS), direction=Direction.Input, + doc="Refinement method (Rietvield or Pawley)") + + self.declareProperty(WorkspaceProperty(name=self.PROP_INPUT_WORKSPACE, defaultValue="", + direction=Direction.Input), doc="Workspace with spectra to fit peaks") + self.declareProperty(name=self.PROP_WORKSPACE_INDEX, defaultValue=0, direction=Direction.Input, + doc="Index of the spectrum in InputWorkspace to fit. By default, the first spectrum " + "(ie the only one for a focused workspace) is used") + self.declareProperty(FileProperty(name=self.PROP_PATH_TO_INST_PARAMS, defaultValue="", action=FileAction.Load, + extensions=[".prm"]), doc="Location of the phase file") + self.declareProperty(FileProperty(name=self.PROP_PATH_TO_PHASE, defaultValue="", action=FileAction.Load, + extensions=[".cif"]), doc="Location of the phase file") + self.declareProperty(FileProperty(name=self.PROP_PATH_TO_GSASII, defaultValue="", action=FileAction.Directory), + doc="Path to the directory containing GSASII executable on the user's machine") + + self.declareProperty(name=self.PROP_OUT_GOF, defaultValue=0.0, direction=Direction.Output, + doc="Goodness of fit value (Chi squared)") + self.declareProperty(name=self.PROP_OUT_RWP, defaultValue=0.0, direction=Direction.Output, + doc="Weight profile R-factor (Rwp) discrepancy index for the goodness of fit") + self.declareProperty(ITableWorkspaceProperty(name=self.PROP_OUT_LATTICE_PARAMS, direction=Direction.Output, + defaultValue=self.PROP_OUT_LATTICE_PARAMS), + doc="Table to output the lattice parameters (refined)") + self.declareProperty(FileProperty(name=self.PROP_GSAS_PROJ_PATH, defaultValue="", action=FileAction.Save, + extensions=".gpx"), doc="GSASII Project to work on") + + self.setPropertyGroup(self.PROP_OUT_GOF, self.PROP_OUT_GROUP_RESULTS) + self.setPropertyGroup(self.PROP_OUT_RWP, self.PROP_OUT_GROUP_RESULTS) + self.setPropertyGroup(self.PROP_OUT_LATTICE_PARAMS, self.PROP_OUT_GROUP_RESULTS) + self.setPropertyGroup(self.PROP_GSAS_PROJ_PATH, self.PROP_OUT_GROUP_RESULTS) + + self.declareProperty(name=self.PROP_PAWLEY_DMIN, defaultValue=1.0, direction=Direction.Input, + doc="For Pawley refiment: as defined in GSAS-II, the minimum d-spacing to be used in a " + "Pawley refinement. Please refer to the GSAS-II documentation for full details.") + self.declareProperty(name=self.PROP_PAWLEY_NEGATIVE_WEIGHT, defaultValue=0.0, direction=Direction.Input, + doc="For Pawley refinement: as defined in GSAS-II, the weight for a penalty function " + "applied during a Pawley refinement on resulting negative intensities. " + "Please refer to the GSAS-II documentation for full details.") + + self.setPropertyGroup(self.PROP_PAWLEY_DMIN, self.PROP_GROUP_PAWLEY_PARAMS) + self.setPropertyGroup(self.PROP_PAWLEY_NEGATIVE_WEIGHT, self.PROP_GROUP_PAWLEY_PARAMS) + + self.declareProperty(name=self.PROP_SUPPRESS_GSAS_OUTPUT, defaultValue=False, direction=Direction.Input, + doc="Set to True to prevent GSAS run info from being " + "printed (not recommended, but can be useful for debugging)") def PyExec(self): - prog = Progress(self, start=0, end=1, nreports=5) - - prog.report('Importing GSAS-II ') - self._run_threadsafe(self._import_gsas2, self.getProperty(self.PROP_PATH_TO_GSASII).value) - - prog.report('Initializing GSAS-II ') - gs2 = self._run_threadsafe(self._init_gs2) - - prog.report('Loading and preparing input data') - focused_wks = self._get_focused_wks(self.PROP_INPUT_WORKSPACE, self.PROP_WORKSPACE_INDEX) - - inst_file = self.getProperty(self.PROP_INSTR_FILE).value - try: - (gs2_rd, limits, peaks_init, background_def) =\ - self._run_threadsafe(self._load_prepare_data_for_fit, gs2, focused_wks, inst_file) - except RuntimeError as rexc: - raise RuntimeError("Error in execution of GSAS-II data loading routines: " - "{0}.".format(str(rexc))) - - # No obvious way to provide proper progress report from inside the refinement/fitting routines - prog.report('Running refinement. This may take some times') - try: - (gof_estimates, lattice_params, parm_dict) = \ - self._run_threadsafe(self._run_refinement, - gs2, gs2_rd, (limits, peaks_init, background_def)) - except RuntimeError as rexc: - raise RuntimeError("Error in execution of GSAS-II refinement routines: " - "{0}".format(str(rexc))) - - prog.report('Producing outputs') - self._save_project_read_lattice(gs2, gs2_rd) - self._produce_outputs(gof_estimates, lattice_params, parm_dict) - - import time - time.sleep(0.1) - - def _run_threadsafe(self, func_call, *args, **kwargs): - """ - GSAS-II is a wx application. When running inside MantidPlot it needs GUI/threadsafe - treatment - """ - # if 'mantidplot' in locals() or 'mantidplot' in globals(): - try: - import pymantidplot - return pymantidplot.threadsafe_call(func_call, *args, **kwargs) - except ImportError: - return func_call(*args, **kwargs) - - def _get_focused_wks(self, wks_prop_name, index_prop_name): - in_wks = self.getProperty(wks_prop_name).value - in_idx = self.getProperty(index_prop_name).value - - if in_wks.getNumberHistograms() > 1 or 0 != in_idx: - focused_wks = msapi.ExtractSpectra(InputWorkspace = in_wks, StartWorkspaceIndex = in_idx, - EndworkspaceIndex = in_idx) - else: - focused_wks = in_wks - - return focused_wks - - def _load_prepare_data_for_fit(self, gs2, gs2_focused_wks, inst_file): - """ - Loads the data into an "rd" object as used in the GSAS-II python modules, and - prepares an initia peaks list and a background function definition. - Importantly, this reads the rd.powderdata list with the histogram data, - and the rd.pwdparms tuple with instrument parameter lists - - @param gs2 :: the main GSAS-II object - @param gs2_focused_wks :: a focused (single spectrum) workspace - @param inst_file :: GSAS instrument parameters file (.par / .prm / .iparm, etc.) - - @returns a tuple with: 1) 3) GSAS-II "rd" object. 2) limits for fitting, - 3) list of peaks, 4) background definition, These are ready to be passed to the - peak fitting functions - """ - gs2_rd = self._build_gsas2_reader_with_data(gs2, gs2_focused_wks) - self.log().information("Loaded histogram data in GSAS-II data object: {0}". - format(gs2_rd.powderdata)) + refinement_method = self.getPropertyValue(self.PROP_REFINEMENT_METHOD) + if refinement_method == self.REFINEMENT_METHODS[2]: # Peak fitting + raise NotImplementedError("GSAS-II Peak fitting not yet implemented in Mantid") - gs2_rd, inst_parms = self._add_instrument_info(gs2, gs2_rd, inst_file) - self.log().information("Parameters from instrument file: {0}".format(gs2_rd.pwdparms)) + with self._suppress_stdout(): + gsas_proj = self._initialise_GSAS() - background_def = self._build_add_background_def(gs2_rd) - self.log().information("Using background function: {0}".format(background_def)) + rwp, gof, lattice_params = \ + self._run_rietveld_pawley_refinement(gsas_proj=gsas_proj, + do_pawley=refinement_method == self.REFINEMENT_METHODS[0]) + self._set_output_properties(rwp=rwp, gof=gof, lattice_params=lattice_params) - limits = self._build_add_limits(gs2_rd) - self.log().information("Fitting loaded histogram data, with limits: {0}".format(limits)) - - # self.PROP_PHASE_INFO_FILE - phase information into rd, this is loaded elsewhere when - # doing Pawley/Rietveld refinement + def _build_output_lattice_table(self, lattice_params): + alg = self.createChildAlgorithm('CreateEmptyTableWorkspace') + alg.execute() + table = alg.getProperty('OutputWorkspace').value - # Assumes peaks of type back-to-back exponential convoluted with pseudo-Voigt - # That is true in ENGIN-X instrument parameters file - peaks_init = self._init_peaks_list(gs2_rd, limits, inst_parms) - self.log().information("Peaks parameters initialized as: {0}".format(peaks_init)) + for param in self.LATTICE_TABLE_PARAMS: + table.addColumn("double", param.split("_")[-1]) - return (gs2_rd, limits, peaks_init, background_def) + table.addRow([float(lattice_params[param]) for param in self.LATTICE_TABLE_PARAMS]) + return table - def _run_refinement(self, gs2, gs2_rd, fit_inputs): + def _extract_spectrum_from_workspace(self): """ - Run the different refinement/fitting methods - - @param gs2 :: the main GSAS-II object - @param gs2_rd :: the GSAS-II "rd" object with powder data in it - @param fit_inputs :: a tuple with inputs for the fitting process. 3 elements: - limits, peaks_list, background_def. 1) limits is a tuple with the min X and max X values - for the fitting. 2) peaks_list is a list of peaks to fit. 3) background_def is the - background function as defined in GSAS-II (a list of parameters) - - @return a tuple with 1) the two goodness-of-fit estimates, 2) lattice params (list with - 7 values), 3) parameters fitted (when doing peak fitting) + Extract a single spectrum from the input workspace. If the input workspace only has one spectrum then just + return the input workspace + :return: Single-spectrum workspace """ - method = self.getProperty(self.PROP_METHOD).value - gof_estimates = [0, 0] - lattice_params = 7*[0.0] - parm_dict = {} - if "Pawley refinement" == method: - (gof_estimates, lattice_params) = self._run_rietveld_pawley_refinement(gs2, gs2_rd, True) - elif "Rietveld refinement" == method: - (gof_estimates, lattice_params) = self._run_rietveld_pawley_refinement(gs2, gs2_rd, False) - elif "Peak fitting" == method: - (gof_estimates, parm_dict) = self._run_peak_fit(gs2_rd, fit_inputs) + ws = self.getPropertyValue(self.PROP_INPUT_WORKSPACE) + if mtd[ws].getNumberHistograms > 1: + ws_index = self.getPropertyValue(self.PROP_WORKSPACE_INDEX) + spectrum = mantid.ExtractSpectra(InputWorkspace=ws, StartWorkspaceIndex=ws_index, EndWorkspaceIndex=ws_index) + mantid.DeleteWorkspace(Workspace=ws) + return spectrum else: - raise RuntimeError("Inconsistency found. Unknown refinement method: {0}".format(method)) - - return (gof_estimates, lattice_params, parm_dict) - - def _run_rietveld_pawley_refinement(self, gs2, gs2_rd, do_pawley): - """ - Run Rietveld or Pawley refinement - - @param do_pawley :: Select Pawley (True) or Rietveld (False) - @param gs2 :: the main GSAS-II object - @param gs2_rd :: the GSAS-II "rd" object with powder data in it. Phase information will be - connected to it - - @return a tuple with 1) the two goodness-of-fit estimates, 2) lattice params (list with - 7 values) - """ - phase_data = self._load_prepare_phase_data(gs2, gs2_rd, - self.getProperty(self.PROP_PHASE_INFO_FILE).value) - - # Enable / tick on "Refine unit cell" - general_phase_data = phase_data['General'] - general_phase_data['Cell'][0] = True - if do_pawley: - # Note from GSAS-II doc: "you probably should clear the Histogram scale factor refinement - # flag (found in Sample parameters for the powder data set) as it cannot be refined - # simultaneously with the Pawley reflection intensities" - gs2_rd.Sample['Scale'] = [1.0, False] - - # Flag for Pawley intensity extraction (bool) - general_phase_data['doPawley'] = True - # maximum Q (as d-space) to use for Pawley extraction - general_phase_data['Pawley dmin'] = self.getProperty(self.PROP_PAWLEY_DMIN).value - # Restraint value for negative Pawley intensities - general_phase_data['Pawley neg wt'] = self.getProperty(self.PROP_PAWLEY_NEG_WEIGHT).value - - proj_filename = self.getProperty(self.PROP_OUT_PROJECT_FILE).value - self.log().notice("Saving GSAS-II project file before starting refinement, into: {0}". - format(proj_filename)) - - # Save project with phase data imported and connected now - self._save_gsas2_project(gs2, gs2_rd, proj_filename) - - residuals = self._do_refinement(gs2) - gof_estimates = (residuals['Rwp'], residuals['GOF'], residuals['chisq']) - # the first value is the flag refine on/off. The following 7 are the proper lattice params - lattice_params = general_phase_data['Cell'][1:] + return ws - import os - self.log().notice("GSAS-II refinement details produced in file: {0}". - format(os.path.splitext(gs2.GSASprojectfile)[0]) + '.lst') - - return (gof_estimates, lattice_params) - - def _do_refinement(self, gs2): + def _initialise_GSAS(self): """ - Calls the refinement routines of the structure module of GSASII - - @param gs2 :: the main GSAS-II object - - @return residuals, a dictionary that among other parameters contains 'Rwp', - 'GOF', 'chisq', and 'converged' + Initialise a GSAS project object with a spectrum and an instrument parameter file + :return: GSAS project object """ - # assume gs2.GSASprojectfile == proj_filename which should be true because - # The project file always needs to be saved before running refine - import GSASIIstrIO - import GSASIIstrMain - err_msg, warn_msg = GSASIIstrIO.ReadCheckConstraints(gs2.GSASprojectfile) - if err_msg: - raise RuntimeError("Error in refinement: {0}".format(err_msg)) - if warn_msg: - raise RuntimeError("Conflict between refinement flag settings " - "and constraints: {0}".format(err_msg)) - - # note: if result_ok==False, residuals is actually an error message text! - result_ok, residuals = GSASIIstrMain.Refine(gs2.GSASprojectfile, dlg=None, useDlg=False) - if not result_ok: - raise RuntimeError("There was a problem while running the core refinement routine. " - "Error description: {0}".format(residuals)) - else: - self.log().notice("Refinement routine finished successfully with Rwp (weighted) " - "profile R-factor: {0}".format(residuals['Rwp'])) - - return residuals - - def _run_peak_fit(self, gs2_rd, fit_inputs): - """ - This performs peak fitting as in GSAS-II "Peaks List/Peak Fitting/PeakFitType". - Does not require/use phase information. Requires histogram data, instrument parameters - and background. - - @param gs2_rd :: the GSAS-II "rd" object with powder data in it. It must have a - 'powderdata' member with the histogram data as used in GSAS-II. a list of vectors - (X, Y vectors) - @param fit_inputs :: tuple with three inputs: - 1) tuple with the min X and max X values for the fitting - 2) peaks_list :: list of peaks to fit - 3) background_def :: background function as defined in GSAS-II, a list of parameters - - @return a tuple with: 1) a tuple with the Rwp and GoF values (weighted profile - R-factor, goodness of fit), 2) the parameters dictionary - """ - import GSASIIpwd - - (limits, peaks_list, background_def) = fit_inputs - (inst_parm1, inst_parm2) = gs2_rd.pwdparms['Instrument Parameters'] - # peaks: ['pos','int','alp','bet','sig','gam'] / with the refine flag each - sig_dict, result, sig, Rvals, vary_list, parm_dict, full_vary_list, bad_vary = \ - GSASIIpwd.DoPeakFit(FitPgm = 'LSQ', Peaks = peaks_list, - Background = background_def, - Limits = limits, - Inst = inst_parm1, Inst2 = inst_parm2, - data = gs2_rd.powderdata, - prevVaryList = None - # OneCycle = False, controls = None, dlg = None - ) - self.log().debug("Result: : {0}".format(result)) - Rwp = Rvals['Rwp'] - gof = Rvals['GOF'] - self.log().information("Rwp: : {0}".format(Rwp)) - self.log().information("GoF: : {0}".format(gof)) - self.log().information("'Sig': {0}".format(sig)) - self.log().information("'Sig', values: : {0}".format(sig_dict)) - self.log().information("List of parameters fitted: : {0}".format(vary_list)) - self.log().information("Parameters fitted, values: {0}".format(parm_dict)) - self.log().information("Full list of parameters: {0}".format(full_vary_list)) - self.log().information("Parameters for which issues were found when refining: {0}". - format(bad_vary)) - - # chisq value (the 3rd) is not returned by DoPeakFit - TODO, how? - gof_estimates = (Rwp, gof, 0) - return (gof_estimates, parm_dict) - - def _import_gsas2(self, additional_path_prop): + gsas_path = self.getPropertyValue(self.PROP_PATH_TO_GSASII) + sys.path.append(gsas_path) try: + import GSASIIscriptable as GSASII + except ImportError: + error_msg = "Could not import GSAS-II. Are you sure it's installed at {}?".format(gsas_path) + logger.error(error_msg) + raise ImportError(error_msg) - import sys - import os - if additional_path_prop: - sys.path.append(additional_path_prop) - os.chdir(additional_path_prop) - self._import_global("GSASII") - # It will be needed for save project - self._import_global("GSASIIIO") - # For powder diffraction data fitting routines - self._import_global("GSASIIpwd") - self._import_global("GSASIIgrid") - # For phase data loading (yes, GUI) - self._import_global("GSASIIphsGUI") - self._import_global("GSASIIspc") - # for Rietveld/Pawley refinement - self._import_global("GSASIIstrIO") - self._import_global("GSASIIstrMain") - if additional_path_prop: - sys.path.pop() - except ImportError as ierr: - raise ImportError("Failed to import the GSASII and its required sub-modules " - "from GSAS-II. Please make sure that it is available in the " - "Mantid Python path and/or the path to GSAS-II given in the " - "input property " + additional_path_prop + ". More error " - "details: " + str(ierr)) - - def _import_global(self, mod_name): - globals()[mod_name] = __import__(mod_name) - - def _init_gs2(self): - # Do not feel tempted to create the GSASII wx app in the usual way: - # _gsas2_app = GSASII.GSASIImain(0) - # This will use Show() and SetTopWindow() and that will cause a crash when the - # algorithm finishes and is destroyed! - # This seems to destroy/close safely - import wx - import GSASII - - self._gsas2_app = wx.App() - - gs2 = GSASII.GSASII(None) - return gs2 - - def _build_gsas2_reader_with_data(self, gs2, gs2_focused_wks): - """ - Build an "rd" GSAS-II data structure with reader, and importantly the rd.powderdata - list with the histogram data. - - @param gs2_focused_wks :: a workspace with a histogram from focused data - - @return an "rd" object as used in GSAS-II, with histogram data in the 'powderdata' - data member - """ - - # produce histo_file, to "import" it with the reader from GSAS-II which will initialize - # the "rd" object. - import tempfile - with tempfile.NamedTemporaryFile(mode='w', suffix='.xye', delete=False) as histo_data_tmp: - # SplitFiles=False is important to get the filename without suffix - msapi.SaveFocusedXYE(InputWorkspace=gs2_focused_wks, Filename=histo_data_tmp.name, - Format="XYE", SplitFiles=False) - gs2_rd = self._get_histo_data_reader(gs2, histo_data_tmp.name) - - gs2_rd.powderdata = self._transform_to_centers_bins(gs2_rd.powderdata) - - if not isinstance(gs2_rd.powderdata, list): - raise ValueError('rd.powderdata is not a list of array as expected') - - return gs2_rd - - def _get_histo_data_reader(self, gs2, histo_data_file): - readers_list = self._init_histo_data_readers(gs2) - if not isinstance(readers_list, list) or len(readers_list) < 6: - raise RuntimeError("Could not find the reader of type G2pwd_xye.xye_ReaderClass. " - "Got a list of only {0} readers.".format(len(readers_list))) - - # 6 is a: 'G2pwd_xye.xye_ReaderClass object'. Warning: this can change sometimes - reader_xye = [readers_list[6]] - if not isinstance(reader_xye[0], object): - raise RuntimeError("Could not find the reader of type G2pwd_xye.xye_ReaderClass. " - "Got this object: {0}".format(reader_xye)) - - success, gs2_rd_list, err_msg = gs2.ImportDataGeneric(histo_data_file, reader_xye, []) - if not success or 0 == len(gs2_rd_list): - raise RuntimeError('Empty list of readers. Cannot continue. The error message from ' - 'GSAS-II is: ' + err_msg) + gsas_proj_path = self.getPropertyValue(self.PROP_GSAS_PROJ_PATH) + gsas_proj = GSASII.G2Project(filename=gsas_proj_path) - # work only with the first one - gs2_rd = gs2_rd_list[0] + spectrum = self._extract_spectrum_from_workspace() + spectrum_path = self._save_temporary_fxye(spectrum=spectrum) + mantid.DeleteWorkspace(Workspace=spectrum) - return gs2_rd + inst_param_path = self.getPropertyValue(self.PROP_PATH_TO_INST_PARAMS) + gsas_proj.add_powder_histogram(datafile=spectrum_path, iparams=inst_param_path) - def _transform_to_centers_bins(self, powderdata): - """ - Transform data that comes as a list: X vector (bin boundaries), multiple Y vectors - (values) into a list: X vector (bin centers), multiply Y vectors. This replicates - the behavior of GSAS-II and the way it loads XYE files. + self._remove_temporary_fxye(spectrum_path=spectrum_path) - @param powderdata :: two dimensional array. In the outermost dimension, the first - element is a vector of X values. The next elements are vectors of Y values. - """ - powderdata[0] = self._calc_centers_bins(powderdata[0]) - for pdi in range(1, len(powderdata)): - powderdata[pdi] = powderdata[pdi][:-1] + return gsas_proj - return powderdata - - def _calc_centers_bins(self, data): - """ - Assuming that data is a vector of bin limits, changes it to the centers - of the bins. + def _remove_temporary_fxye(self, spectrum_path): + try: + os.remove(spectrum_path) + except Exception as e: + raise Warning("Couldn't remove temporary spectrum file at location \"{}\":\n{}".format(spectrum_path, e)) - @param data :: one-dimensional array + def _run_rietveld_pawley_refinement(self, gsas_proj, do_pawley): """ - return (data[0:-1]+data[1:])/2.0 - - def _add_instrument_info(self, gs2, gs2_rd, inst_file): - if 'Instrument Parameters' not in gs2_rd.pwdparms: - gs2.zipfile = None # init required before GetPowderIparm - # Trick: if you pass lastIparmfile (being sure that it exits) it will be - # picked up without having to ask via a pop-up dialog - # An alternative is to set 'gs2_rd.instparm = inst_file' but that assumes both - # the data and instrument files are in the same directory - inst_parm1, inst_parm2 = gs2.GetPowderIparm(rd=gs2_rd, prevIparm=None, - lastIparmfile=inst_file, lastdatafile='') - if not inst_parm1: # or not inst_parm2: # (note inst_parm2 is commonly an empty dict) - raise RuntimeError('Failed to import the instrument parameter structure') - - gs2_rd.pwdparms['Instrument Parameters'] = (inst_parm1, inst_parm2) - - return (gs2_rd, (inst_parm1, inst_parm2)) - - def _build_add_background_def(self, gs2_rd): - # Note: blatantly ignores self.getProperty(self.PROP_BACKGROUND_TYPE) - backg_def = [['chebyschev', True, 3, 1.0, 0.0, 0.0], - {'peaksList': [], 'debyeTerms': [], 'nPeaks': 0, 'nDebye': 0}] - - gs2_rd.pwdparms['Background'] = backg_def - - return backg_def - - def _build_add_limits(self, gs2_rd): - - min_x = self.getProperty(self.PROP_MINX).value - if Property.EMPTY_DBL == min_x: - min_x = gs2_rd.powderdata[0].min() - max_x = self.getProperty(self.PROP_MAXX).value - if Property.EMPTY_DBL == max_x: - max_x = gs2_rd.powderdata[0].max() - - limits = [min_x, max_x] - gs2_rd.pwdparms['Limits'] = limits - - return limits - - def _init_peaks_list(self, gs2_rd, limits, inst_parms): - # Bring the auto-search code out of that file! - TODO in GSAS - import GSASIIpwdGUI - - (inst_parm1, inst_parm2) = inst_parms - peaks_init = GSASIIpwdGUI.DoPeaksAutoSearch(gs2_rd.powderdata, limits, inst_parm1, inst_parm2) - # Note this sets as default: refine intensity, and no other parameters - for peak in peaks_init: - peak[1] = self.getProperty(self.PROP_REFINE_CENTER).value - peak[3] = self.getProperty(self.PROP_REFINE_INTENSITY).value - peak[5] = self.getProperty(self.PROP_REFINE_ALPHA).value - peak[7] = self.getProperty(self.PROP_REFINE_BETA).value - # sigma (Gaussian) - peak[9] = self.getProperty(self.PROP_REFINE_SIGMA).value - # gamma (Lorentzian) - peak[11] = self.getProperty(self.PROP_REFINE_GAMMA).value - - # Just to have the same sequence as GSAS-II in its tables/standard output - peaks_init.sort() - peaks_init.reverse() - - return peaks_init - - def _load_prepare_phase_data(self, gs2, gs2_rd, phase_filename): + Run a Rietveld or Pawley refinement + :param gsas_proj: The project to work on + :param do_pawley: True if doing a Pawley refinement (the default), False if doing a Rietveld refinement + :return: (R weighted profile, goodness-of-fit coefficient, table containing refined lattice parameters) """ - Loads and sets up phase data from a phase information (CIF) file + phase_path = self.getPropertyValue(self.PROP_PATH_TO_PHASE) + phase = gsas_proj.add_phase(phasefile=phase_path, histograms=[gsas_proj.histograms()[0]]) - @param gs2 :: the main GSAS-II object - @param gs2_rd :: the GSAS-II "rd" object with powder data in it - @param phase_filename :: name of the CIF file - - @return phase data object as defined in GSAS-II GSASIIobj.py, with the imported phase - information and other fields set up to defaults. - """ - import GSASIIphsGUI - - # Import phase data from (CIF) file - phase_readers_list = gs2.ImportPhaseReaderlist - # 3 is G2phase_CIF.CIFPhaseReader - phase_readers_list = [phase_readers_list[3]] - - _success, _rd_list, err_msg = gs2.ImportDataGeneric(phase_filename, phase_readers_list, [], - usedRanIdList=['noGUI'], Start=False) - if err_msg: - raise RuntimeError("There was a problem while importing the phase information file ({0}. " - "Error details: {1}".format(phase_filename, err_msg)) - - phase_reader = phase_readers_list[0] - GSASIIphsGUI.SetupGeneralWithoutGUI(gs2, phase_reader.Phase) - phase_data = self._register_phase_data_to_histo(gs2, gs2_rd, phase_reader, phase_filename) - - return phase_data - - def _register_phase_data_to_histo(self, gs2, gs2_rd, phase_reader, phase_filename): - # Register phase data and add it to the histo data - import os - import GSASIIgrid - phase_name = os.path.basename(phase_filename) - phase_reader.Phase['General']['Name'] = phase_name - self.log().debug(" Phase information name: {0}".format(phase_name)) - - if not GSASIIgrid.GetPatternTreeItemId(gs2, gs2.root, 'Phases'): - sub = gs2.PatternTree.AppendItem(parent=gs2.root, text='Phases') + if do_pawley: + self._set_pawley_phase_parameters(phase) + + gsas_proj.set_refinement(refinement=self.DEFAULT_REFINEMENT_PARAMS) + gsas_proj.do_refinements([{}]) + + residuals = gsas_proj.values()[2]["data"]["Rvals"] + lattice_params = gsas_proj.phases()[0].get_cell() + lattice_param_table = self._build_output_lattice_table(lattice_params) + + return residuals["Rwp"], residuals["GOF"], lattice_param_table + + def _save_temporary_fxye(self, spectrum): + """ + Create a temporary fxye file for GSAS to read the spectrum from. This is required as we cannot pass a workspace + straight to GSASIIscriptable, but rather it must be read from a file + :param spectrum: The spectrum to save + :return: Fully qualified path to the new file + """ + workspace_index = self.getPropertyValue(self.PROP_WORKSPACE_INDEX) + temp_dir = tempfile.gettempdir() + # Output file MUST end with "-n.fxye" where n is a number + # If you see "Runtime error: Rvals" from GSASIIscriptable.py, it may be because this name is badly formatted + file_path = os.path.join(temp_dir, "{}_focused_spectrum-{}.fxye".format(self.name(), workspace_index)) + mantid.SaveFocusedXYE(Filename=file_path, InputWorkspace=spectrum, SplitFiles=False) + return file_path + + def _set_output_properties(self, rwp, gof, lattice_params): + self.setProperty(self.PROP_OUT_RWP, rwp) + self.setProperty(self.PROP_OUT_GOF, gof) + self.setProperty(self.PROP_OUT_LATTICE_PARAMS, lattice_params) + + def _set_pawley_phase_parameters(self, phase): + # Note from GSAS-II doc: "you probably should clear the Histogram scale factor refinement + # flag (found in Sample parameters for the powder data set) as it cannot be refined + # simultaneously with the Pawley reflection intensities" + phase.values()[2].values()[0]["Scale"] = [1.0, False] + + phase_params = phase.values()[4] + phase_params["doPawley"] = True + + pawley_dmin = self.getPropertyValue(self.PROP_PAWLEY_DMIN) + phase_params["Pawley dmin"] = pawley_dmin + + pawley_neg_wt = self.getPropertyValue(self.PROP_PAWLEY_NEGATIVE_WEIGHT) + phase_params["Pawley neg wt"] = pawley_neg_wt + + @contextmanager + def _suppress_stdout(self): + """ + Suppress output from print statements. This is mainly useful for debugging, as GSAS does a lot of printing. + """ + if self.getProperty(self.PROP_SUPPRESS_GSAS_OUTPUT).value: + self.log().information("Suppressing stdout") + with open(os.devnull, "w") as devnull: + old_stdout = sys.stdout + sys.stdout = devnull + try: + yield + finally: + sys.stdout = old_stdout else: - sub = GSASIIgrid.GetPatternTreeItemId(gs2, gs2.root, 'Phases') - - psub = gs2.PatternTree.AppendItem(parent=sub, text=phase_name) - gs2.PatternTree.SetItemPyData(psub, phase_reader.Phase) - - # Connect the phase information to the histogram data - sub = GSASIIgrid.GetPatternTreeItemId(gs2, gs2.root, 'Phases') - item, cookie = gs2.PatternTree.GetFirstChild(sub) - phase_name = gs2.PatternTree.GetItemText(item) - self.log().debug("Connecting phase information (name {0} to histogram data, with item: {1}, " - "cookie: {2}".format(phase_name, item, cookie)) - # the histo data is in for example 'PWDR ENGINX_ceria_1000_spectrum-0.txt' - phase_data = gs2.PatternTree.GetItemPyData(item) - - self._setup_additional_phase_data(gs2_rd.idstring, phase_data) - - return phase_data - - def _setup_additional_phase_data(self, powder_histo_name, phase_data): - """ - Setup more phase data parameters in 'Phases' / 'General' and - 'Phases' / 'Histograms'. - - @param phase_data :: from GSAS-II, the first entry in 'Phases' - """ - import GSASII - import GSASIIspc - - SGData = phase_data['General']['SGData'] - use_list = phase_data['Histograms'] - NShkl = len(GSASIIspc.MustrainNames(SGData)) - NDij = len(GSASIIspc.HStrainNames(SGData)) - # like 'PWDR ENGINX_ceria_1000_spectrum-0.txt' - histo_name = 'PWDR ' + powder_histo_name - # 'Reflection Lists' is not defined at this point: - # item_id = GSASIIgrid.GetPatternTreeItemId(gs2, gs2.root, histo_name) - # refList = gs2.PatternTree.GetItemPyData( - # GSASIIgrid.GetPatternTreeItemId(gs2, item_id, 'Reflection Lists')) - # refList[general_phase_data['Name']] = {} - use_list[histo_name] = GSASII.SetDefaultDData('PWDR', histo_name, NShkl=NShkl, NDij=NDij) - - def _produce_outputs(self, gof_estimates, lattice_params, parm_dict): - (result_rwp, result_gof, _result_chisq) = gof_estimates - self.setProperty(self.PROP_OUT_RWP, result_rwp) - self.setProperty(self.PROP_OUT_GOF, result_gof) - - self._build_output_table(parm_dict, self.PROP_OUT_FITTED_PARAMS) - self._build_output_lattice_table(lattice_params, self.PROP_OUT_LATTICE_PARAMS) - - def _build_output_table(self, parm_dict, tbl_prop_name): - tbl_name = self.getPropertyValue(tbl_prop_name) - if not tbl_name: - return - - par_names = ['Center', 'Intensity', 'Alpha', 'Beta', 'Sigma', 'Gamma'] - par_prefixes = ['pos','int','alp','bet','sig','gam'] - table = msapi.CreateEmptyTableWorkspace(OutputWorkspace=tbl_name) - - num_peaks = 0 - while par_prefixes[0] + str(num_peaks) in parm_dict: - num_peaks += 1 - - for name in par_names: - table.addColumn('double', name) - - for idx in range(0, num_peaks): - par_values = [ parm_dict[par_prefix + str(idx)] for par_prefix in par_prefixes] - print("par_values: ", par_values) - table.addRow(par_values) - - for parm in parm_dict: - self.log().debug("Parameters for output table: {0}".format(parm)) - - def _build_output_lattice_table(self, lattice_params, tbl_prop_name): - tbl_name = self.getPropertyValue(tbl_prop_name) - if not tbl_name: - return - - alg = self.createChildAlgorithm('CreateEmptyTableWorkspace') - alg.execute() - table = alg.getProperty('OutputWorkspace').value - self.setProperty(tbl_prop_name, table) - - table.addColumn('double', 'a') - table.addColumn('double', 'b') - table.addColumn('double', 'c') - table.addColumn('double', 'alpha') - table.addColumn('double', 'beta') - table.addColumn('double', 'gamma') - table.addColumn('double', 'volume') - - table.addRow([float(par) for par in lattice_params]) - - def _save_project_read_lattice(self, gs2, gs2_rd): - """ - To save the project at the very end, and parse lattice params from the output .lst - file from GSAS. - - @param gs2 :: the main GSAS-II object - @param gs2_rd :: the GSAS-II "rd" object with powder data in it - """ - out_proj_file = self.getProperty(self.PROP_OUT_PROJECT_FILE).value - if out_proj_file: - # Not totally sure if this save will leave less information in the output - # file as compared with the save done from the core refine routines. - # those routines save information in the project file that is apparently not - # updated in the project tree (other than loading the saved project file). - self._save_gsas2_project(gs2, gs2_rd, out_proj_file) - try: - file_lattice_params = self._parse_lattice_params_refined(out_proj_file) - self.log().notice("Lattice parameters found in output file: {0}".format(file_lattice_params)) - except IOError: - self.log().notice("The output project lst file was not found for this project: {0}". - format(out_proj_file)) - - def _parse_lattice_params_refined(self, out_proj_file): - """ - Parses lattice parameters from the output .lst file (refinement results) - corresponding to the project file given as input - - @param out_proj_file : GSAS-II project file name - - @Returns a tuple with the lattice parameter values refined. 7 parameters: - (a, b, c, alpha, beta, gamma, Volume) - """ - import os - import re - lst_filename = os.path.splitext(out_proj_file)[0] + '.lst' - with open(lst_filename) as results_file: - results_lst = results_file.read() - - re_lattice_params = (r"Unit\s+cell:\s+a\s+=\s+(\d+.\d+)\s+b\s+=\s+(\d+.\d+)\s+c\s+=\s+(\d+.\d+)" - r"\s+alpha\s+=\s+(\d+.\d+)\s+beta\s+=\s+(\d+.\d+)\s+gamma\s+=\s+(\d+.\d+)" - r"\s+volume\s+=\s+(\d+.\d+)") - pattern = re.compile(re_lattice_params) - lines_match = pattern.findall(results_lst) - - # Depending on what refinement options are enabled the cell info is produced in different - # places and formats. Alternatively look for something like - # values: 2.470000 2.470000 6.790000 90.0000 90.0000 120.0000 35.875 - if not lines_match: - more_re_lattice_params = (r"\s+values:\s+(\d+.\d+)\s+(\d+.\d+)\s+(\d+.\d+)\s+(\d+.\d+)" - r"\s+(\d+.\d+)\s+(\d+.\d+)\s+(\d+.\d+)") - pattern = re.compile(more_re_lattice_params) - lines_match = pattern.findall(results_lst) - - params = lines_match[0] - - return params - - def _save_lattice_params_file(self, _gs2, out_lattice_file): - (latt_a, latt_b, latt_c, latt_alpha, latt_beta, latt_gamma) = 6*[0] - # To grab parameters from the gs2 object: - # G2gd.GetPatternTreeItemId(self,self.root,'Phases') - with open(out_lattice_file, 'w') as lattice_txt: - print("a, b, c, alpha, beta, gamma", file=lattice_txt) - print(("{0}, {1}, {2}, {3}, {4}, {5}". - format(latt_a, latt_b, latt_c, latt_alpha, latt_beta, latt_gamma)), file=lattice_txt) - - def _prepare_save_gsas2_project(self, gs2, gs2_rd): - """ - GSAS-II projects are saved/loaded from/to the tree of the main window. - This populates the GSAS-II GUI tree, getting it ready for saving. - - It needs to save at least all the elements save here even if we are not - using effectively (for example 'Sample Parameters'). Otherwise the code - will fail in various places (for example at the end of DoPeakFit). - This is based on GSASII.OnDummyPowder, GSASII.GetPWDRdatafromTree, - GSASII.OnImportPowder - - Assumes that the (two) instrument parameter objects have been added in - gs2_rd.pwdparms['Instrument Parameters'] - that the limits have been initialized in - gs2_rd.pwdparms['Limits'] - and that the background has been initialized in - gs2_rd.pwdparms['Background'] - as it would be done in GSAS-II (sometimes). - - @param gs2 :: the main GSAS-II object - @param gs2_rd :: the GSAS-II "rd" object with powder data - """ - - import random - import sys - import GSASIIgrid - - # set GUI tree items - histo_name = 'PWDR ' + gs2_rd.idstring - tree_id = gs2.PatternTree.AppendItem(parent=gs2.root, text=histo_name) - valuesdict = { - 'wtFactor':1.0, - 'Dummy':True, - 'ranId':random.randint(0,sys.maxsize), - 'Offset':[0.0,0.0],'delOffset':0.02,'refOffset':-1.0,'refDelt':0.01, - 'qPlot':False,'dPlot':False,'sqrtPlot':False - } - # Warning: with the comment "this should be removed someday" - gs2_rd.Sample['ranId'] = valuesdict['ranId'] - gs2.PatternTree.SetItemPyData(tree_id, [valuesdict, gs2_rd.powderdata]) - - gs2.PatternTree.SetItemPyData( - gs2.PatternTree.AppendItem(tree_id, text='Comments'), - gs2_rd.comments) - - gs2.PatternTree.SetItemPyData( - gs2.PatternTree.AppendItem(tree_id, text='Limits'), - [gs2_rd.pwdparms['Limits'], [gs2_rd.powderdata[0].min(), - gs2_rd.powderdata[0].max()]]) - gs2.PatternId = GSASIIgrid.GetPatternTreeItemId(gs2, tree_id, 'Limits') - - gs2.PatternTree.SetItemPyData( - gs2.PatternTree.AppendItem(tree_id, text='Background'), - gs2_rd.pwdparms['Background']) - - gs2.PatternTree.SetItemPyData( - gs2.PatternTree.AppendItem(tree_id, text='Instrument Parameters'), - gs2_rd.pwdparms['Instrument Parameters']) - - gs2.PatternTree.SetItemPyData( - gs2.PatternTree.AppendItem(tree_id, text='Sample Parameters'), - gs2_rd.Sample) - - gs2.PatternTree.SetItemPyData( - gs2.PatternTree.AppendItem(tree_id, text='Peak List') - ,{'peaks':[],'sigDict':{}}) - - gs2.PatternTree.SetItemPyData( - gs2.PatternTree.AppendItem(tree_id, text='Index Peak List'), - [[],[]]) - gs2.PatternTree.SetItemPyData( - gs2.PatternTree.AppendItem(tree_id, text='Unit Cells List'), - []) - gs2.PatternTree.SetItemPyData( - gs2.PatternTree.AppendItem(tree_id, text='Reflection Lists'), - {}) - - def _save_gsas2_project(self, gsas2, gs2_rd, proj_filename): - """ - Saves all the information loaded into a GSAS-II project file that can be loaded - in the GSAS-II GUI (.gpx files). - - @param gsas2 :: the main GSAS-II object - @param gs2_rd :: the GSAS-II "rd" object with powder data - @param proj_filename :: name of the output project file - """ - self.log().notice("Preparing GSAS-II project tree to save into: {0}".format(proj_filename)) - self._prepare_save_gsas2_project(gsas2, gs2_rd) - - import GSASIIIO - self.log().debug("Saving GSAS-II project: {0}".format(gsas2)) - gsas2.GSASprojectfile = proj_filename - gsas2.CheckNotebook() - GSASIIIO.ProjFileSave(gsas2) - -# Need GSAS-II _init_Imports() -#pylint: disable=protected-access - def _init_histo_data_readers(self, gs2): - gs2._init_Imports() - readers_list = gs2.ImportPowderReaderlist - return readers_list + yield AlgorithmFactory.subscribe(GSASIIRefineFitPeaks) diff --git a/Framework/PythonInterface/plugins/algorithms/LoadDNSLegacy.py b/Framework/PythonInterface/plugins/algorithms/LoadDNSLegacy.py index 808279da324be743fd146c908f9156ba3c199a83..803bde4c85c0ecd3d03259a19f88c0550c31f648 100644 --- a/Framework/PythonInterface/plugins/algorithms/LoadDNSLegacy.py +++ b/Framework/PythonInterface/plugins/algorithms/LoadDNSLegacy.py @@ -237,7 +237,7 @@ class LoadDNSLegacy(PythonAlgorithm): # add other sample logs logs["names"].extend(["deterota", "mon_sum", "duration", "huber", "omega", "T1", "T2", "Tsp"]) - logs["values"].extend([metadata.deterota, metadata.monitor_counts, metadata.duration, + logs["values"].extend([metadata.deterota, float(metadata.monitor_counts), metadata.duration, metadata.huber, metadata.huber - metadata.deterota, metadata.temp1, metadata.temp2, metadata.tsp]) logs["units"].extend(["Degrees", "Counts", "Seconds", "Degrees", "Degrees", "K", "K", "K"]) diff --git a/Framework/PythonInterface/test/python/plugins/algorithms/GSASIIRefineFitPeaksTest.py b/Framework/PythonInterface/test/python/plugins/algorithms/GSASIIRefineFitPeaksTest.py index 2c84544eb84e86d30309ba3daead754e498c9128..0414faffd92cbbe255b1074809f8a3429af384e1 100644 --- a/Framework/PythonInterface/test/python/plugins/algorithms/GSASIIRefineFitPeaksTest.py +++ b/Framework/PythonInterface/test/python/plugins/algorithms/GSASIIRefineFitPeaksTest.py @@ -1,67 +1,136 @@ from __future__ import (absolute_import, division, print_function) +import os +import tempfile import unittest -from mantid.simpleapi import * +import mantid.simpleapi as mantid from mantid.api import * -class GSASIIRefineFitPeaksTest(unittest.TestCase): + +class _GSASFinder(object): """ - Very limited test, as executing this algorithm requires a modified - version of GSASII. At least it does some check that the algorithm is - registered succesfully and basic sanity checks. + Helper class for unit test - the algorithm can't run without a version of GSAS-II that includes the module + GSASIIscriptable (added April 2017) """ + @staticmethod + def _find_directory_by_name(cur_dir_name, cur_dir_path, name_to_find, level, max_level): + """ + Perform a depth-limited depth-first search to try and find a directory with a given name + """ + if level == max_level: + return None + + if cur_dir_name == name_to_find: + return cur_dir_path + + list_dir = os.listdir(cur_dir_path) + for child in list_dir: + child_path = os.path.join(cur_dir_path, child) + if os.path.isdir(child_path): + try: + path = _GSASFinder._find_directory_by_name(cur_dir_name=child, cur_dir_path=child_path, + level=level + 1, name_to_find=name_to_find, + max_level=max_level) + except OSError: # Probably "Permission denied". Either way, just ignore it + pass + else: + if path is not None: + return path - def test_wrong_properties(self): + @staticmethod + def _path_to_g2conda(): """ - Handle in/out property issues appropriately. + Find the g2conda directory (where GSAS-II normally sits), as long as it exists less than 2 levels away from + the root directory """ - ws_name = 'out_ws' - peak = "name=BackToBackExponential, I=5000,A=1, B=1., X0=10000, S=150" - sws = CreateSampleWorkspace(Function="User Defined", UserDefinedFunction=peak, - NumBanks=1, BankPixelWidth=1, XMin=5000, XMax=30000, - BinWidth=5, OutputWorkspace=ws_name) - instr_filename = 'inexistent_instr_par_file' - - # No InputWorkspace property (required) - self.assertRaises(RuntimeError, - GSASIIRefineFitPeaks, - WorkspaceIndex=0, ExpectedPeaks='1.2, 3.1') - - # Wrong WorkspaceIndex value - self.assertRaises(RuntimeError, - GSASIIRefineFitPeaks, - InputWorkspace=ws_name, - WorkspaceIndex=-3) - - # Wrong property - self.assertRaises(RuntimeError, - GSASIIRefineFitPeaks, - InputWorkspace=ws_name, BankPixelFoo=33, - WorkspaceIndex=0) - - # missing instrument file property - self.assertRaises(RuntimeError, - GSASIIRefineFitPeaks, - InputWorkspace=ws_name, - WorkspaceIndex=0) - - # Wrong InstrumentFile property name - self.assertRaises(ValueError, - GSASIIRefineFitPeaks, - InputWorkspace=ws_name, - InstruMentFile=instr_filename, - WorkspaceIndex=0, ExpectedPeaks='a') - - # Missing file for InstrumentFile - self.assertRaises(ValueError, - GSASIIRefineFitPeaks, - InputWorkspace=ws_name, - InstrumentFile=instr_filename, - WorkspaceIndex=0, ExpectedPeaks='a') - - def test_exec_import_fails(self): - pass + root_directory = os.path.abspath(os.sep) + return _GSASFinder._find_directory_by_name(cur_dir_path=root_directory, cur_dir_name=root_directory, level=0, + name_to_find="g2conda", max_level=2) + + @staticmethod + def GSASIIscriptable_location(): + """ + Find the path to GSASIIscriptable.py, if it exists and is less than 2 levels away from the root directory + """ + path_to_g2conda = _GSASFinder._path_to_g2conda() + if path_to_g2conda is None: + return None + + path_to_gsasii_scriptable = os.path.join(path_to_g2conda, "GSASII") + if os.path.isfile(os.path.join(path_to_gsasii_scriptable, "GSASIIscriptable.py")): + return path_to_gsasii_scriptable + + return None + + +class GSASIIRefineFitPeaksTest(unittest.TestCase): + + _path_to_gsas = None + _gsas_proj = None + _input_ws = None + _phase_file = None + _inst_file = None + + def setUp(self): + data_dir = mantid.config["datasearch.directories"].split(";")[0] + print(mantid.config["datasearch.directories"].split(";")) + + self._phase_file = os.path.join(data_dir, "FE_ALPHA.cif") + self._inst_file = os.path.join(data_dir, "template_ENGINX_241391_236516_North_bank.prm") + + self._path_to_gsas = _GSASFinder.GSASIIscriptable_location() + if self._path_to_gsas is None: + self.skipTest("Could not find GSASIIscriptable.py") + + temp_dir = tempfile.gettempdir() + self._gsas_proj = os.path.join(temp_dir, "GSASIIRefineFitPeaksTest.gpx") + + spectrum_file = os.path.join(data_dir, "focused_bank1_ENGINX00256663.nxs") + self._input_ws = mantid.Load(Filename=spectrum_file, OutputWorkspace="input") + + def tearDown(self): + if os.path.isfile(self._gsas_proj): + os.remove(self._gsas_proj) + + def test_rietveld_refinement_with_default_params(self): + gof, rwp, lattice_table = mantid.GSASIIRefineFitPeaks(RefinementMethod="Rietveld refinement", + InputWorkspace=self._input_ws, + PhaseInfoFile=self._phase_file, + InstrumentFile=self._inst_file, + PathToGSASII=self._path_to_gsas, + SaveGSASIIProjectFile=self._gsas_proj, + MuteGSASII=True) + + self.assertAlmostEqual(gof, 3.57776, delta=1e-6) + self.assertAlmostEquals(rwp, 77.75499, delta=1e6) + row = lattice_table.row(0) + self.assertAlmostEqual(row["a"], 2.8665) + self.assertAlmostEqual(row["b"], 2.8665) + self.assertAlmostEqual(row["c"], 2.8665) + self.assertAlmostEqual(row["alpha"], 90) + self.assertAlmostEqual(row["beta"], 90) + self.assertAlmostEqual(row["gamma"], 90) + self.assertAlmostEqual(row["volume"], 23.554, delta=1e4) + + def test_pawley_refinement_with_default_params(self): + gof, rwp, lattice_table = mantid.GSASIIRefineFitPeaks(RefinementMethod="Pawley refinement", + InputWorkspace=self._input_ws, + PhaseInfoFile=self._phase_file, + InstrumentFile=self._inst_file, + PathToGSASII=self._path_to_gsas, + SaveGSASIIProjectFile=self._gsas_proj, + MuteGSASII=True) + self.assertAlmostEqual(gof, 3.57847, delta=1e-6) + self.assertAlmostEquals(rwp, 77.75515, delta=1e6) + row = lattice_table.row(0) + self.assertAlmostEqual(row["a"], 2.8665) + self.assertAlmostEqual(row["b"], 2.8665) + self.assertAlmostEqual(row["c"], 2.8665) + self.assertAlmostEqual(row["alpha"], 90) + self.assertAlmostEqual(row["beta"], 90) + self.assertAlmostEqual(row["gamma"], 90) + self.assertAlmostEqual(row["volume"], 23.554, delta=1e4) if __name__ == '__main__': unittest.main() diff --git a/Framework/PythonInterface/test/testhelpers/testrunner.py b/Framework/PythonInterface/test/testhelpers/testrunner.py index c5d6d0647139ea7457bf4e76caa06530ad941ebc..2f44d399f31bba493980dbc438238f21b5eb1df4 100644 --- a/Framework/PythonInterface/test/testhelpers/testrunner.py +++ b/Framework/PythonInterface/test/testhelpers/testrunner.py @@ -8,10 +8,10 @@ from __future__ import (absolute_import, division, print_function) import imp import os import sys -from xmlrunner import XMLTestRunner -from xmlrunner.result import _TestInfo, _XMLTestResult, safe_unicode import unittest +from xmlrunner import XMLTestRunner +from xmlrunner.result import _TestInfo, _XMLTestResult, safe_unicode class GroupedNameTestInfo(_TestInfo): """ @@ -128,4 +128,8 @@ def result_class(pathname): if __name__ == "__main__": + # Import mantid so that it sets up the additional paths to scripts etc + # It would be good to try & remove this to soften the impact on tests + # that don't require importing mantid at all + import mantid # noqa main(sys.argv) diff --git a/Framework/TestHelpers/inc/MantidTestHelpers/FakeObjects.h b/Framework/TestHelpers/inc/MantidTestHelpers/FakeObjects.h index 981e8b10e3ffc36189178d57a81365c629dcab70..14b0eebabb29782941ff494c58c9a9b43df5cb5f 100644 --- a/Framework/TestHelpers/inc/MantidTestHelpers/FakeObjects.h +++ b/Framework/TestHelpers/inc/MantidTestHelpers/FakeObjects.h @@ -54,6 +54,10 @@ public: m_histogram.setCountStandardDeviations(0); } + void copyDataFrom(const ISpectrum &other) override { + other.copyDataInto(*this); + } + void setX(const Mantid::Kernel::cow_ptr<Mantid::HistogramData::HistogramX> &X) override { m_histogram.setX(X); @@ -93,6 +97,11 @@ protected: Mantid::HistogramData::Histogram m_histogram; private: + using ISpectrum::copyDataInto; + void copyDataInto(SpectrumTester &other) const override { + other.m_histogram = m_histogram; + } + const Mantid::HistogramData::Histogram &histogramRef() const override { return m_histogram; } diff --git a/Framework/TestHelpers/src/ParallelRunner.cpp b/Framework/TestHelpers/src/ParallelRunner.cpp index f28bd2f31829e98812e96701cbf11f8595e1edc0..a09c412f1a8a0878f0854884ee722105d42f70c1 100644 --- a/Framework/TestHelpers/src/ParallelRunner.cpp +++ b/Framework/TestHelpers/src/ParallelRunner.cpp @@ -28,7 +28,8 @@ ParallelRunner::ParallelRunner(const int threads) { if (comm.size() != 1 && comm.size() != threads) throw("ParallelRunner: number of requested threads does not match number " "of MPI ranks"); - m_backend = boost::make_shared<detail::ThreadingBackend>(threads); + if (comm.size() == 1) + m_backend = boost::make_shared<detail::ThreadingBackend>(threads); m_serialBackend = boost::make_shared<detail::ThreadingBackend>(1); } diff --git a/Framework/Types/inc/MantidTypes/Core/DateAndTime.h b/Framework/Types/inc/MantidTypes/Core/DateAndTime.h index ca83e247a12bb1d96ea462ad0a08e78ea88f32de..ab43475508049d7cfdc910615ae57fc544f81bd3 100644 --- a/Framework/Types/inc/MantidTypes/Core/DateAndTime.h +++ b/Framework/Types/inc/MantidTypes/Core/DateAndTime.h @@ -147,10 +147,66 @@ public: private: /// A signed 64-bit int of the # of nanoseconds since Jan 1, 1990. int64_t _nanoseconds; + + /// Max allowed nanoseconds in the time; 2^62-1 + static constexpr int64_t MAX_NANOSECONDS = 4611686018427387903LL; + + /// Min allowed nanoseconds in the time; -2^62+1 + static constexpr int64_t MIN_NANOSECONDS = -4611686018427387903LL; }; #pragma pack(pop) + +/** Default, empty constructor */ +inline DateAndTime::DateAndTime() : _nanoseconds(0) {} + +/** Construct a date from nanoseconds. + * @param total_nanoseconds :: nanoseconds since Jan 1, 1990 (our epoch). + */ +inline DateAndTime::DateAndTime(const int64_t total_nanoseconds) { + // Make sure that you cannot construct a date that is beyond the limits... + if (total_nanoseconds > MAX_NANOSECONDS) + _nanoseconds = MAX_NANOSECONDS; + else if (total_nanoseconds < MIN_NANOSECONDS) + _nanoseconds = MIN_NANOSECONDS; + else + _nanoseconds = total_nanoseconds; +} + +/** + operator to add time. + * @param nanosec :: number of nanoseconds to add + * @return modified DateAndTime. + */ +inline DateAndTime DateAndTime::operator+(const int64_t nanosec) const { + return DateAndTime(_nanoseconds + nanosec); +} + +/** + operator to add time. + * @param sec :: duration to add + * @return modified DateAndTime. + */ +inline DateAndTime DateAndTime::operator+(const double sec) const { + return this->operator+(nanosecondsFromSeconds(sec)); +} + +/** Nanoseconds from seconds, with limits + * @param sec :: duration in seconds, as a double + * @return int64 of the number of nanoseconds + */ +inline int64_t DateAndTime::nanosecondsFromSeconds(double sec) { + const double nano = sec * 1e9; + constexpr double minimum = static_cast<double>(MIN_NANOSECONDS); + constexpr double maximum = static_cast<double>(MAX_NANOSECONDS); + // Use these limits to avoid integer overflows + if (nano > maximum) + return MAX_NANOSECONDS; + else if (nano < minimum) + return MIN_NANOSECONDS; + else + return int64_t(nano); +} + } // namespace Core } // namespace Types } // namespace Mantid -#endif // MANTID_TYPES_DATE_AND_TIME_H \ No newline at end of file +#endif // MANTID_TYPES_DATE_AND_TIME_H diff --git a/Framework/Types/inc/MantidTypes/Event/TofEvent.h b/Framework/Types/inc/MantidTypes/Event/TofEvent.h index 3c9d7f6c0d0bfbb5387083b7b253d8af27638906..b997d0525365490fd9ed95eeede78b8a833e4ef5 100644 --- a/Framework/Types/inc/MantidTypes/Event/TofEvent.h +++ b/Framework/Types/inc/MantidTypes/Event/TofEvent.h @@ -15,7 +15,6 @@ class LoadEventNexus; } namespace Types { namespace Event { -//========================================================================================== /** Info about a single neutron detection event: * * - the time of flight of the neutron (can be converted to other units) @@ -82,9 +81,20 @@ public: }; #pragma pack(pop) -//========================================================================================== -// TofEvent inlined member function definitions -//========================================================================================== +/** Constructor, specifying the time of flight only + * @param tof :: time of flight, in microseconds + */ +inline TofEvent::TofEvent(const double tof) : m_tof(tof), m_pulsetime(0) {} + +/** Constructor, specifying the time of flight and the frame id + * @param tof :: time of flight, in microseconds + * @param pulsetime :: absolute pulse time of the neutron. + */ +inline TofEvent::TofEvent(const double tof, const Core::DateAndTime pulsetime) + : m_tof(tof), m_pulsetime(pulsetime) {} + +/// Empty constructor +inline TofEvent::TofEvent() : m_tof(0), m_pulsetime(0) {} /** () operator: return the tof (X value) of the event. * This is useful for std operations like comparisons and std::lower_bound @@ -112,4 +122,4 @@ inline double TofEvent::errorSquared() const { return 1.0; } } // namespace Event } // namespace Types } // namespace Mantid -#endif // MANTID_TYPES_TOFEVENT_H \ No newline at end of file +#endif // MANTID_TYPES_TOFEVENT_H diff --git a/Framework/Types/inc/MantidTypes/SpectrumDefinition.h b/Framework/Types/inc/MantidTypes/SpectrumDefinition.h index b4445e88dbd22802ee4367ef5863bbdb4c836401..4276b2ef7431ff33af7e87e4d9bff485e971cc77 100644 --- a/Framework/Types/inc/MantidTypes/SpectrumDefinition.h +++ b/Framework/Types/inc/MantidTypes/SpectrumDefinition.h @@ -41,6 +41,10 @@ namespace Mantid { */ class SpectrumDefinition { public: + SpectrumDefinition() = default; + explicit SpectrumDefinition(const size_t detectorIndex, + const size_t timeIndex = 0) + : m_data{{detectorIndex, timeIndex}} {} /// Returns the size of the SpectrumDefinition, i.e., the number of detectors /// (or rather detector positions) that the spectrum comprises. size_t size() const { return m_data.size(); } diff --git a/Framework/Types/src/Core/DateAndTime.cpp b/Framework/Types/src/Core/DateAndTime.cpp index c249f820006af939c4e61cd9ef64f6a241ade3a6..f421c9b045fbf94df9f6888dac2774d9d3ea4677 100644 --- a/Framework/Types/src/Core/DateAndTime.cpp +++ b/Framework/Types/src/Core/DateAndTime.cpp @@ -16,15 +16,9 @@ const time_duration DateAndTime::ONE_SECOND = boost::posix_time::time_duration(0, 0, 1, 0); namespace { -/// Max allowed nanoseconds in the time; 2^62-1 -const int64_t MAX_NANOSECONDS = 4611686018427387903LL; - /// Max allowed seconds in the time const int64_t MAX_SECONDS = 4611686017LL; -/// Min allowed nanoseconds in the time; -2^62+1 -const int64_t MIN_NANOSECONDS = -4611686018427387903LL; - /// Min allowed seconds in the time const int64_t MIN_SECONDS = -4611686017LL; @@ -123,24 +117,6 @@ time_t DateAndTime::utc_mktime(struct tm *utctime) { return result; } -//------------------------------------------------------------------------------------------------ -/** Default, empty constructor */ -DateAndTime::DateAndTime() : _nanoseconds(0) {} - -//------------------------------------------------------------------------------------------------ -/** Construct a date from nanoseconds. - * @param total_nanoseconds :: nanoseconds since Jan 1, 1990 (our epoch). - */ -DateAndTime::DateAndTime(const int64_t total_nanoseconds) { - // Make sure that you cannot construct a date that is beyond the limits... - if (total_nanoseconds > MAX_NANOSECONDS) - _nanoseconds = MAX_NANOSECONDS; - else if (total_nanoseconds < MIN_NANOSECONDS) - _nanoseconds = MIN_NANOSECONDS; - else - _nanoseconds = total_nanoseconds; -} - //------------------------------------------------------------------------------------------------ /** Construct a time from an ISO8601 string * @@ -612,14 +588,6 @@ bool DateAndTime::operator>=(const DateAndTime &rhs) const { } //------------------------------------------------------------------------------------------------ -/** + operator to add time. - * @param nanosec :: number of nanoseconds to add - * @return modified DateAndTime. - */ -DateAndTime DateAndTime::operator+(const int64_t nanosec) const { - return DateAndTime(_nanoseconds + nanosec); -} - /** += operator to add time. * @param nanosec :: number of nanoseconds to add * @return modified DateAndTime. @@ -688,13 +656,6 @@ DateAndTime &DateAndTime::operator-=(const time_duration &td) { } //------------------------------------------------------------------------------------------------ -/** + operator to add time. - * @param sec :: duration to add - * @return modified DateAndTime. - */ -DateAndTime DateAndTime::operator+(const double sec) const { - return this->operator+(nanosecondsFromSeconds(sec)); -} /** += operator to add time. * @param sec :: duration to add @@ -819,24 +780,6 @@ time_duration DateAndTime::durationFromNanoseconds(int64_t dur) { #endif } -//----------------------------------------------------------------------------------------------- -/** Nanoseconds from seconds, with limits - * @param sec :: duration in seconds, as a double - * @return int64 of the number of nanoseconds - */ -int64_t DateAndTime::nanosecondsFromSeconds(double sec) { - const double nano = sec * 1e9; - constexpr double minimum = static_cast<double>(MIN_NANOSECONDS); - constexpr double maximum = static_cast<double>(MAX_NANOSECONDS); - // Use these limits to avoid integer overflows - if (nano > maximum) - return MAX_NANOSECONDS; - else if (nano < minimum) - return MIN_NANOSECONDS; - else - return int64_t(nano); -} - //----------------------------------------------------------------------------------------------- /** Static method to create a vector of DateAndTime objects * using a start time and seconds offset. To speed things up, diff --git a/Framework/Types/src/Event/TofEvent.cpp b/Framework/Types/src/Event/TofEvent.cpp index 723b7d67d41006752bca50e67f9d61d4dcf6c04e..ce0ee8e761bc2a7153098fc6cf3e392051aea1ee 100644 --- a/Framework/Types/src/Event/TofEvent.cpp +++ b/Framework/Types/src/Event/TofEvent.cpp @@ -6,24 +6,6 @@ namespace Mantid { namespace Types { using Core::DateAndTime; namespace Event { -//========================================================================== -/// --------------------- TofEvent stuff ---------------------------------- -//========================================================================== -/** Constructor, specifying the time of flight only - * @param tof :: time of flight, in microseconds - */ -TofEvent::TofEvent(const double tof) : m_tof(tof), m_pulsetime(0) {} - -/** Constructor, specifying the time of flight and the frame id - * @param tof :: time of flight, in microseconds - * @param pulsetime :: absolute pulse time of the neutron. - */ -TofEvent::TofEvent(const double tof, const DateAndTime pulsetime) - : m_tof(tof), m_pulsetime(pulsetime) {} - -/// Empty constructor -TofEvent::TofEvent() : m_tof(0), m_pulsetime(0) {} - /** Comparison operator. * @param rhs: the other TofEvent to compare. * @return true if the TofEvent's are identical.*/ @@ -81,4 +63,4 @@ ostream &operator<<(ostream &os, const TofEvent &event) { } } // namespace Event } // namespace Types -} // namespace Mantid \ No newline at end of file +} // namespace Mantid diff --git a/Framework/Types/test/SpectrumDefinitionTest.h b/Framework/Types/test/SpectrumDefinitionTest.h index c8824414ee09901710ceb74d1fba1d3b2e656f4e..55c6b873d0ac98a521b3c7a82a5947a1f72fcfb4 100644 --- a/Framework/Types/test/SpectrumDefinitionTest.h +++ b/Framework/Types/test/SpectrumDefinitionTest.h @@ -16,6 +16,23 @@ public: } static void destroySuite(SpectrumDefinitionTest *suite) { delete suite; } + void test_default_construct() { + SpectrumDefinition def; + TS_ASSERT_EQUALS(def.size(), 0); + } + + void test_construct_no_time() { + SpectrumDefinition def(42); + TS_ASSERT_EQUALS(def.size(), 1); + TS_ASSERT_EQUALS(def[0], (std::pair<size_t, size_t>(42, 0))); + } + + void test_construct() { + SpectrumDefinition def(42, 7); + TS_ASSERT_EQUALS(def.size(), 1); + TS_ASSERT_EQUALS(def[0], (std::pair<size_t, size_t>(42, 7))); + } + void test_size() { SpectrumDefinition def; TS_ASSERT_EQUALS(def.size(), 0); diff --git a/MantidPlot/CMakeLists.txt b/MantidPlot/CMakeLists.txt index c7567758772821a19898000ac909536187c63749..e6517ffbf7693059adcd26e79bbfda79de158722 100644 --- a/MantidPlot/CMakeLists.txt +++ b/MantidPlot/CMakeLists.txt @@ -824,6 +824,8 @@ if (OSX_VERSION VERSION_GREATER 10.8) set_target_properties(MantidPlot PROPERTIES INSTALL_RPATH "@executable_path;@executable_path/../Libraries") endif () +set_target_properties ( MantidPlot PROPERTIES FOLDER "Qt4" ) + ########################################################################### # Custom Info.plist file for OS X ########################################################################### diff --git a/Testing/Data/SystemTest/FE_ALPHA.cif.md5 b/Testing/Data/SystemTest/FE_ALPHA.cif.md5 new file mode 100644 index 0000000000000000000000000000000000000000..e004f2c6113ee3505066af65c9c97cc926d69675 --- /dev/null +++ b/Testing/Data/SystemTest/FE_ALPHA.cif.md5 @@ -0,0 +1 @@ +b184c411cf657178803326c733b2a34c \ No newline at end of file diff --git a/Testing/Data/SystemTest/focused_bank1_ENGINX00256663.nxs.md5 b/Testing/Data/SystemTest/focused_bank1_ENGINX00256663.nxs.md5 new file mode 100644 index 0000000000000000000000000000000000000000..573dd5a42d921c01a79427ad89778533d579d91f --- /dev/null +++ b/Testing/Data/SystemTest/focused_bank1_ENGINX00256663.nxs.md5 @@ -0,0 +1 @@ +5e52c1fa6927d04649b6456e70a07999 \ No newline at end of file diff --git a/Testing/Data/SystemTest/template_ENGINX_241391_236516_North_bank.prm.md5 b/Testing/Data/SystemTest/template_ENGINX_241391_236516_North_bank.prm.md5 new file mode 100644 index 0000000000000000000000000000000000000000..33cf4f9fbeedac4ce7adcb9a9b62e6070e9cf78e --- /dev/null +++ b/Testing/Data/SystemTest/template_ENGINX_241391_236516_North_bank.prm.md5 @@ -0,0 +1 @@ +093020d8461c62709297603faf712449 \ No newline at end of file diff --git a/buildconfig/CMake/Bootstrap.cmake b/buildconfig/CMake/Bootstrap.cmake index 6027149ee4ae214be7acd6eb77c1fee16185146a..d927789189a04c9ed68379f9a25e0b25789bd047 100644 --- a/buildconfig/CMake/Bootstrap.cmake +++ b/buildconfig/CMake/Bootstrap.cmake @@ -10,7 +10,7 @@ if( MSVC ) include ( ExternalProject ) set( EXTERNAL_ROOT ${PROJECT_SOURCE_DIR}/external CACHE PATH "Location to clone third party dependencies to" ) set( THIRD_PARTY_GIT_URL "https://github.com/mantidproject/thirdparty-msvc2015.git" ) - set ( THIRD_PARTY_GIT_SHA1 9af2288ba00e184b8659edbeedb12ce771b87bb5 ) + set ( THIRD_PARTY_GIT_SHA1 ef72b6d824ff2df21cca80c87b565136cc4020aa ) set ( THIRD_PARTY_DIR ${EXTERNAL_ROOT}/src/ThirdParty ) # Generates a script to do the clone/update in tmp set ( _project_name ThirdParty ) diff --git a/buildconfig/CMake/FindPyUnitTest.cmake b/buildconfig/CMake/FindPyUnitTest.cmake index 07a701f6e99ab44444b5fc946e7c5eb89544f0cf..9bfc279dca311adcce015b4edb2496a8d1a7179a 100644 --- a/buildconfig/CMake/FindPyUnitTest.cmake +++ b/buildconfig/CMake/FindPyUnitTest.cmake @@ -2,7 +2,9 @@ # PYUNITTEST_ADD_TEST (public macro to add unit tests) # Adds a set of python tests based upon the unittest module # Parameters: -# _test_src_dir :: The directory where the src files reside +# _test_src_dir_base :: A base directory when added to the relative test paths gives +# an absolute path to that test. This directory is added to the +# PYTHONPATH when tests are executed # _testname_prefix :: A prefix for each test that is added to ctest, the name will be # ${_testname_prefix}_TestName # ${ARGN} :: List of test files @@ -10,7 +12,7 @@ function ( PYUNITTEST_ADD_TEST _test_src_dir _testname_prefix ) # Property for the module directory set ( _working_dir ${CMAKE_BINARY_DIR}/bin/Testing ) if ( CMAKE_GENERATOR MATCHES "Visual Studio" OR CMAKE_GENERATOR MATCHES "Xcode" ) - set ( _module_dir ${CMAKE_BINARY_DIR}/bin/$<CONFIGURATION> ) + set ( _module_dir ${CMAKE_BINARY_DIR}/bin/$<CONFIG> ) else() set ( _module_dir ${CMAKE_BINARY_DIR}/bin ) endif() @@ -18,18 +20,27 @@ function ( PYUNITTEST_ADD_TEST _test_src_dir _testname_prefix ) if ( WIN32 ) set ( _test_runner ${_test_runner}.bat ) endif () + set ( _test_runner_module ${CMAKE_SOURCE_DIR}/Framework/PythonInterface/test/testhelpers/testrunner.py ) + # Environment + if (${CMAKE_SYSTEM_NAME} STREQUAL "Windows") + set ( _python_path ${PYTHON_XMLRUNNER_DIR};${_test_src_dir};$ENV{PYTHONPATH} ) + # cmake list separator and Windows environment seprator are the same so escape the cmake one + string ( REPLACE ";" "\\;" _python_path "${_python_path}" ) + else() + set ( _python_path ${PYTHON_XMLRUNNER_DIR}:${_test_src_dir}:$ENV{PYTHONPATH} ) + endif() # Add all of the individual tests so that they can be run in parallel foreach ( part ${ARGN} ) - get_filename_component( _filename ${part} NAME ) + set ( _filename ${part} ) get_filename_component( _suitename ${part} NAME_WE ) # We duplicate the suitename so that it matches the junit output name set ( _pyunit_separate_name "${_testname_prefix}.${_suitename}.${_suitename}" ) add_test ( NAME ${_pyunit_separate_name} - COMMAND ${_test_runner} --classic -m testhelpers.testrunner ${_test_src_dir}/${_filename} ) + COMMAND ${_test_runner} --classic ${_test_runner_module} ${_test_src_dir}/${_filename} ) # Set the PYTHONPATH so that the built modules can be found set_tests_properties ( ${_pyunit_separate_name} PROPERTIES - ENVIRONMENT PYTHONPATH=${PYTHON_XMLRUNNER_DIR} + ENVIRONMENT "PYTHONPATH=${_python_path}" WORKING_DIRECTORY ${_working_dir} TIMEOUT ${TESTING_TIMEOUT} ) endforeach ( part ${ARGN} ) diff --git a/buildconfig/CMake/PythonTargetFunctions.cmake b/buildconfig/CMake/PythonTargetFunctions.cmake deleted file mode 100644 index f8cb5151614eaaaef5dfa11a56a47852f8284e6c..0000000000000000000000000000000000000000 --- a/buildconfig/CMake/PythonTargetFunctions.cmake +++ /dev/null @@ -1,36 +0,0 @@ -# Utility functions to add targets that "build" from pure python -# sources - -# Add rules to create a target that will copy and copy the -# given python sources to a given destination. The file names should -# be given without any directory prefix, e.g -# -# set ( SRCS -# __init__.py -# ) -# add_python_package ( TARGET_NAME mypkg -# SRCS ${SRCS} -# OUTPUT_DIR ${CMAKE_BINARY_DIR}/bin/mypkg -# ) -# -# will produce a directory in the specified location containing the listed -# files. -# -# Arguments: -# TARGET_NAME: The name of the target -# OUTPUT_DIR: The directory for the copied and compiled files -# SRCS: A list of python source files for this package -function (add_python_package) - set (options) - set (oneValueArgs TARGET_NAME OUTPUT_DIR) - set (multiValueArgs SRCS) - cmake_parse_arguments (PARSED "${options}" "${oneValueArgs}" - "${multiValueArgs}" ${ARGN}) - - foreach( _it ${PARSED_SRCS} ) - get_filename_component( _filename ${_it} NAME_WE ) - set ( _pyc ${_filename}.pyc ) - add_custom_command ( OUTPUT ) - endforeach() - add_custom_target ( ${PARSED_TARGET_NAME} ) -endfunction () diff --git a/buildconfig/CMake/QtTargetFunctions.cmake b/buildconfig/CMake/QtTargetFunctions.cmake index d33dde593531a9db7e5e1cd7c0a67cd9db5a99f2..f7e4ee530cc0947c8200ce1a1ada836eb44f20e7 100644 --- a/buildconfig/CMake/QtTargetFunctions.cmake +++ b/buildconfig/CMake/QtTargetFunctions.cmake @@ -188,6 +188,16 @@ function (mtd_add_qt_target) endif() install ( TARGETS ${_target} ${SYSTEM_PACKAGE_TARGET} DESTINATION ${_install_dir} ) endif() + + # Group into folder for VS + set_target_properties ( ${_target} PROPERTIES FOLDER "Qt${PARSED_QT_VERSION}" ) + # Target encompassing all Qt-based dependencies + set ( _alltarget "AllQt${PARSED_QT_VERSION}" ) + if ( TARGET ${_alltarget} ) + add_dependencies ( ${_alltarget} ${_target} ) + else () + add_custom_target ( ${_alltarget} DEPENDS ${_target} ) + endif() endfunction() function (mtd_add_qt_tests) @@ -262,6 +272,11 @@ function (mtd_add_qt_test_executable) foreach (_dep ${PARSED_PARENT_DEPENDENCIES}) add_dependencies (${_dep} ${_target_name}) endforeach() + + # set folder + if ( CMAKE_GENERATOR MATCHES "Visual Studio" ) + set_target_properties ( ${_target_name} PROPERTIES FOLDER "Qt${PARSED_QT_VERSION}Tests" ) + endif() endfunction () # Given a list of arguments decide which Qt versions diff --git a/buildconfig/CMake/SipQtTargetFunctions.cmake b/buildconfig/CMake/SipQtTargetFunctions.cmake index 6500d5e9f640fea8d1249224d1cfa36a79c972bd..f3ac6294068d45640e6410455f6709eb929a7a51 100644 --- a/buildconfig/CMake/SipQtTargetFunctions.cmake +++ b/buildconfig/CMake/SipQtTargetFunctions.cmake @@ -52,7 +52,7 @@ function ( mtd_add_sip_module ) -I ${PARSED_SIP_SRC_DIR} -I ${_pyqt_sip_dir} ${_pyqt_sip_flags} -c ${CMAKE_CURRENT_BINARY_DIR} -j1 -w -e ${_module_spec} - DEPENDS ${_module_spec} ${SIP_SRCS} ${PARSED_HEADER_DEPS} + DEPENDS ${_module_spec} ${SIP_SRCS} ${PARSED_HEADER_DEPS} ${SIP_INCLUDE_DIR}/sip.h COMMENT "Generating ${PARSED_MODULE_NAME} python bindings with sip" ) diff --git a/buildconfig/Jenkins/buildscript b/buildconfig/Jenkins/buildscript index b6245a8a4ae8520cba71c23c111f6e2201b3c633..1293775af96f1d9466f10ff4de074dccdebf02ef 100755 --- a/buildconfig/Jenkins/buildscript +++ b/buildconfig/Jenkins/buildscript @@ -43,6 +43,9 @@ cmake --version if [[ ${JOB_NAME} == *clean* ]]; then CLEANBUILD=true fi +if [[ ${JOB_NAME} == *clang_tidy* ]]; then + CLEANBUILD=true +fi if [[ ${JOB_NAME} == *pull_requests* ]]; then PRBUILD=true diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt index 458264537c814d20205980504305f74652541c99..3378c933d330532d3f3b6503dd737ee06da57aa3 100644 --- a/docs/CMakeLists.txt +++ b/docs/CMakeLists.txt @@ -47,7 +47,7 @@ if ( SPHINX_FOUND ) add_custom_command ( OUTPUT qthelp/MantidProject.qhcp qthelp/MantidProject.qhp COMMAND ${DOCS_RUNNER_EXE} -xq runsphinx_qthelp.py - DEPENDS Framework MantidPlot MantidQt ${CMAKE_CURRENT_BINARY_DIR}/runsphinx_qthelp.py + DEPENDS Framework AllQt4 ${CMAKE_CURRENT_BINARY_DIR}/runsphinx_qthelp.py COMMENT "Building qt-assistant index files") add_custom_command ( OUTPUT qthelp/MantidProject.qhc @@ -76,7 +76,7 @@ if ( SPHINX_FOUND ) configure_file ( runsphinx.py.in runsphinx_html.py @ONLY ) add_custom_target ( ${TARGET_PREFIX}-html COMMAND ${DOCS_RUNNER_EXE} -xq runsphinx_html.py - DEPENDS Framework MantidPlot MantidQt ${CMAKE_CURRENT_BINARY_DIR}/runsphinx_html.py ${SPHINX_CONF_DIR}/conf.py conf-html.py + DEPENDS Framework AllQt4 ${CMAKE_CURRENT_BINARY_DIR}/runsphinx_html.py ${SPHINX_CONF_DIR}/conf.py conf-html.py COMMENT "Building html documentation" ) # Group within VS and exclude from whole build @@ -95,7 +95,7 @@ if ( SPHINX_FOUND ) configure_file ( runsphinx.py.in runsphinx_epub.py @ONLY ) add_custom_target ( ${TARGET_PREFIX}-epub COMMAND ${DOCS_RUNNER_EXE} -xq runsphinx_epub.py - DEPENDS Framework MantidPlot MantidQt ${CMAKE_CURRENT_BINARY_DIR}/runsphinx_epub.py ${SPHINX_CONF_DIR}/conf.py COMMENT "Building html documentation" + DEPENDS Framework AllQt4 ${CMAKE_CURRENT_BINARY_DIR}/runsphinx_epub.py ${SPHINX_CONF_DIR}/conf.py COMMENT "Building html documentation" ) # Group within VS and exclude from whole build set_target_properties ( ${TARGET_PREFIX}-epub PROPERTIES FOLDER "Documentation" @@ -112,7 +112,7 @@ if ( SPHINX_FOUND ) configure_file ( runsphinx.py.in runsphinx_linkcheck.py @ONLY ) add_custom_target ( ${TARGET_PREFIX}-linkcheck COMMAND ${DOCS_RUNNER_EXE} -xq runsphinx_linkcheck.py - DEPENDS Framework MantidPlot MantidQt ${CMAKE_CURRENT_BINARY_DIR}/runsphinx_linkcheck.py ${SPHINX_CONF_DIR}/conf.py + DEPENDS Framework AllQt4 ${CMAKE_CURRENT_BINARY_DIR}/runsphinx_linkcheck.py ${SPHINX_CONF_DIR}/conf.py COMMENT "Checking documentation links" ) # Group within VS and exclude from whole build @@ -135,7 +135,7 @@ if ( SPHINX_FOUND ) configure_file ( runsphinx.py.in runsphinx_doctest.py @ONLY ) add_custom_target ( ${TARGET_PREFIX}-test COMMAND ${DOCS_RUNNER_EXE} -xq runsphinx_doctest.py - DEPENDS Framework MantidPlot ${CMAKE_CURRENT_BINARY_DIR}/runsphinx_doctest.py + DEPENDS Framework AllQt4 ${CMAKE_CURRENT_BINARY_DIR}/runsphinx_doctest.py ${SPHINX_CONF_DIR}/conf.py COMMENT "Running documentation tests" ) diff --git a/docs/source/algorithms/BASISReduction-v1.rst b/docs/source/algorithms/BASISReduction-v1.rst index 8fe53f129b319b87ce7d65630b49eaeff1827296..93dc4a8b287f91f860bd693e02ee020b9ce650cb 100644 --- a/docs/source/algorithms/BASISReduction-v1.rst +++ b/docs/source/algorithms/BASISReduction-v1.rst @@ -27,6 +27,17 @@ Examples: If *DoIndividual* is checked, then each run number is reduced separately from the rest. The semicolon symbol is ignored. +**ExcludeTimeSegment**: +Events happening in a time segment with no proton charge are most likely +noise. Those events can be filtered out of the reduction process. + +Example: + +- "71465:0-500;71466:900-2100;71467:4000-end" will filter out events + happening between the start of the run and 500 seconds for run 71465, then + between 900 and 2100 seconds for run 71466 and between 4000 seconds and the + end of the run for 71467. Only one time segment can be excluded per run number. + **Momentum transfer binning scheme**: Three values are required, the center of the bin with the minimum momentum, the bin width, and the center of the bin with the maximum momentum. @@ -132,4 +143,3 @@ Workflow -------- .. diagram:: BASISReduction-v1_wkflw.dot - diff --git a/docs/source/algorithms/CalculatePolynomialBackground-v1.rst b/docs/source/algorithms/CalculatePolynomialBackground-v1.rst index 56d052de6697a9bb1b02acd7872218dd24ba0f13..4887185a85b59e06f4367d95b6008ed308fefab5 100644 --- a/docs/source/algorithms/CalculatePolynomialBackground-v1.rst +++ b/docs/source/algorithms/CalculatePolynomialBackground-v1.rst @@ -12,6 +12,8 @@ Description This algorithm calculates backgrounds for the histograms in a workspace by fitting a polynomial to ranges given by *XRanges*. The fitting is done using :ref:`algm-Fit`. The backgrounds are returned as the *OutputWorkspace* which can be subsequently subtracted from the original workspace. The degree of the polynomial can be given by *Degree*. A zeroth degree polynomial would correspond to fitting a constant value. The *XRanges* property is a list of range start and range end value pairs in the X axis units. Overlapping ranges are merged. If no *XRanges* are given, full histograms are used. +The value of *CostFunction* is passed to :ref:`algm-Fit` as-is. The default option is 'Least squares' which uses the histogram errors as weights. This might not be desirable, e.g. when there are bins with zero counts and zero errors. An 'Unweighted least squares' option is available to deal with such cases. + Usage ----- diff --git a/docs/source/algorithms/GSASIIRefineFitPeaks-v1.rst b/docs/source/algorithms/GSASIIRefineFitPeaks-v1.rst index b385330f22deb5d397cfbc0d41ecc342a55b4a77..dd34a81d2900a139094eb85764479d77b0f365d8 100644 --- a/docs/source/algorithms/GSASIIRefineFitPeaks-v1.rst +++ b/docs/source/algorithms/GSASIIRefineFitPeaks-v1.rst @@ -11,31 +11,19 @@ Description .. warning:: - This algorithm is experimental and at the moment is being developed - for a specific technique. It might be changed, renamed or even - removed without a notification, should instrument scientists decide - to do so. - -.. warning:: - - This algorithm requires GSAS-II, with custom modifications to - enable it to be used from Mantid. Please contact the Mantid - developers for details. The GSAS-II installation instructions are - available from the `GSAS-II website - <https://subversion.xray.aps.anl.gov/trac/pyGSAS>`_. + This algorithm requires GSAS-II to be installed on your computer. A + version of GSAS-II containing the module GSASIIscriptable (added in + April 2017) is required. Uses `GSAS-II <https://subversion.xray.aps.anl.gov/trac/pyGSAS>`_ [TobyVonDreele2013]_ as external software to fit peaks to a powder / engineering diffraction pattern. Here the process of peak fitting is in the context of Rietveld / Pawley / Le Bail analysis [LeBail2005]_ -The algorithm supports three refinement or fitting methods: Pawley -refinement, Rietveld refinement, and single peak fitting (or "Peaks -List" of GSAS-II). The first two methods of this algorithm implement -whole diffraction pattern fitting whereas the third method fits peaks -individually. The use of this algorithm is very close to the examples -described in these two GSAS-II tutorials: `Rietveld fitting / CW -Neutron Powder fit for Yttrium-Iron Garnet +The algorithm supports two refinement methods: Pawley refinement and +Rietveld refinement. The use of this algorithm is very close to the +examples described in these two GSAS-II tutorials: `Rietveld fitting / +CW Neutron Powder fit for Yttrium-Iron Garnet <https://subversion.xray.aps.anl.gov/pyGSAS/Tutorials/CWNeutron/Neutron%20CW%20Powder%20Data.htm>`_, and `Getting started / Fitting individual peaks & autoindexing <https://subversion.xray.aps.anl.gov/pyGSAS/Tutorials/FitPeaks/Fit%20Peaks.htm>`_, @@ -46,42 +34,28 @@ and the `structure routines <https://subversion.xray.aps.anl.gov/pyGSAS/sphinxdocs/build/html/GSASIIstruc.html>`_ of GSAS-II. -To run this algorithm GSAS-II must be installed and it must be -available for importing from the Mantid Python interpreter. This -algorithm requires a modified version of GSAS-II. Please contact the -developers for details. - -The methods "Pawley refinement" and "Rietveld refinement" of this -algorithm are equivalent to the function "Calculate / Refine" from the -main menu of the GSAS-II GUI. The method "Peak fitting" is equivalent -to the "Peak Fitting / Peak fit" action of the "Peaks List" window -menu of the GSAS-II GUI. +The refinement methods of this algorithm are equivalent to the +function "Calculate / Refine" from the main menu of the GSAS-II GUI. The main inputs required are histogram data, an instrument definition -parameter (in GSAS format, readable by GSAS-II), and various -parameters for the fitting/refinement process. Phase information is -also required to use the Pawley and Rietveld refinement. +parameter (in GSAS format, readable by GSAS-II), phase information and +various parameters for the fitting/refinement process. The phase information must be provided in `CIF format (Crystallographic Information File) <https://en.wikipedia.org/wiki/Crystallographic_Information_File>`_. -When phase information is available and the Rietveld/Pawley method is -used the algorithm will output the lattice parameters in a table -workspace. The values are given for the the full set of lattice -parameters (three lattice constants, three angles, and volume in this -sequence: a, b, c, alpha, beta, gamma, volume). The a,b, and c values -are given in Angstroms (:math:`\mathrm{\AA{}}`). The angles are given -in degrees, and the volume in :math:`\mathrm{\AA{}}^3`. +When phase information is available the algorithm will output the +lattice parameters in a table workspace. The values are given for the +the full set of lattice parameters (three lattice constants, three +angles, and volume in this sequence: a, b, c, alpha, beta, gamma, +volume). The a,b, and c values are given in Angstroms +(:math:`\mathrm{\AA{}}`). The angles are given in degrees, and the +volume in :math:`\mathrm{\AA{}}^3`. The algorithm provides goodness-of-fit estimates in the outputs *GoF* and *Rwp* or weighted profile R-factor [Toby2008]_. The *Rwp* is given as a percentage value. -Note that the option to save the GSAS-II project file -(*SaveGSASIIProjectFile*) is mandatory. This is a requirement of -GSAS-II. These project files can be opened in the GSAS-II GUI for -further processing and analysis of the data. - When Pawley refinement is selected as refinement method the flag for histogram scale factor refinement is disabled, as recommended in the `GSAS-II documentation @@ -95,23 +69,6 @@ the same name as the output GSAS-II project file but with extension ".lst". This is noted in a log message that specifies where the file has been written (next to the output project file). -When fitting individual peaks using the peak fitting method (not using -Rietveld/Pawley refinement), the algorithm only supports peaks with -shape of type back-to-back exponential convoluted with pseudo-voigt -(BackToBackExponentialPV). It is possible to enable the refinement of -the different function parameters via several properties (RefineAlpha, -RefineSigma, etc.). The fitted peak parameters are given in an output -table with as many rows as peaks have been found. The columns of the -table give the parameters fitted, similarly to the information found -in the "Peaks List" window of the GSAS-II GUI. These results are -printed in the log messages as well. - -.. seealso:: For fitting single peaks, one at a time, :ref:`EnggFitPeaks - <algm-EnggFitPeaks>`. For other algorithms that implement different - variants of whole diffraction pattern refinement and fitting by - :ref:`PawleyFit <algm-PawleyFit>` and :ref:`LeBailFit <algm-LeBailFit>`. - - *References*: .. [LeBail2005] Le Bail, A (2005). "Whole Powder Pattern Decomposition Methods and @@ -128,6 +85,13 @@ printed in the log messages as well. Usage ----- +.. warning:: + + Take these usage examples with a pinch of salt, as they are not + tested for correctness on our servers, due to the requirement to + have GSAS-II installed. Please contact the Mantid developers if + something is awry. + **Example - Pawley refinement of lattice parameters from a diffraction spectrum** .. code-block:: python @@ -145,11 +109,11 @@ Usage # wks=Load('focused_bank1_ENGINX00256663.nxs') GoF, Rwp, lattice_tbl = GSASIIRefineFitPeaks(InputWorkspace=wks, - InstrumentFile='ENGINX_255924_254854_North_bank.par', + RefinementMethods="PawleyRefinement", + InstrumentFile='template_ENGINX_241391_236516_North_bank.prm', PhaseInfoFile='FE_ALPHA.cif', PathToGSASII='/home/user/gsas', - SaveGSASIIProjectFile='example_gsas2_project', - LatticeParameters='lattice_tbl') + SaveGSASIIProjectFile='example_gsas2_project') print "Goodness of fit coefficient: {0:.5f}".format(GoF) print "Weighted profile R-factor (Rwp): {0:.5f}".format(Rwp) print ("Lattice parameters, a: {a}, b: {b}, c: {c}, alpha: {alpha}, beta: {beta}, gamma: {gamma}, " @@ -159,8 +123,8 @@ Output: .. code-block:: none - Goodness of fit coefficient: 3.63591 - Weighted profile R-factor (Rwp): 77.27831 + Goodness of fit coefficient: 3.57776 + Weighted profile R-factor (Rwp): 77.75449 Lattice parameters, a: 2.8665, b: 2.8665, c: 2.8665, alpha: 90.0, beta: 90.0, gamma: 90.0, Volume: 23.554 **Example - Rietveld refinement of lattice parameters from a diffraction spectrum** @@ -169,12 +133,11 @@ Output: wks=Load('focused_bank1_ENGINX00256663.nxs') GoF, Rwp, lattice_tbl = GSASIIRefineFitPeaks(InputWorkspace=wks, - Method='Rietveld refinement', - InstrumentFile='ENGINX_255924_254854_North_bank.par', + RefinementMethod='Rietveld refinement', + InstrumentFile='template_ENGINX_241391_236516_North_bank.prm', PhaseInfoFile='FE_ALPHA.cif', PathToGSASII='/home/user/gsas', SaveGSASIIProjectFile='example_gsas2_project', - LatticeParameters='lattice_tbl') print "Goodness of fit coefficient: {0:.5f}".format(GoF) print "Weighted profile R-factor (Rwp): {0:.5f}".format(Rwp) print ("Lattice parameters, a: {a}, b: {b}, c: {c}, alpha: {alpha}, beta: {beta}, gamma: {gamma}, " @@ -184,37 +147,10 @@ Output: .. code-block:: none - Goodness of fit coefficient: 3.62483 - Weighted profile R-factor (Rwp): 77.03530 + Goodness of fit coefficient: 3.57847 + Weighted profile R-factor (Rwp): 77.75515 Lattice parameters, a: 2.8665, b: 2.8665, c: 2.8665, alpha: 90.0, beta: 90.0, gamma: 90.0, Volume: 23.554 -**Example - Fit several peaks from a diffraction spectrum** - -.. code-block:: python - - wks=Load('focused_bank1_ENGINX00256663.nxs') - params_tbl_name = 'tbl_fitted_params' - GoF, Rwp, lattice_tbl = GSASIIRefineFitPeaks(InputWorkspace=wks, - Method='Peak fitting', - FittedPeakParameters=params_tbl_name, - InstrumentFile='ENGINX_255924_254854_North_bank.par', - PhaseInfoFile='FE_ALPHA.cif', - PathToGSASII='/home/user/mantid-repos/gsas', - SaveGSASIIProjectFile='test_gsas2_project', - FittedPeakParameters=params_tbl_name) - tbl_fitted_params = mtd[params_tbl_name] - print "Fitted {0} peaks.".format(tbl_fitted_params.rowCount()) - print ("Parameters of the first peak. Center: {Center:.6g}, intensity: {Intensity:.5f}, " - "alpha: {Alpha:.5f}, beta: {Beta:.5f}, sigma: {Sigma:.5f}, gamma: {Gamma:.5f}". - format(**tbl_fitted_params.row(0))) - -Output: - -.. code-block:: none - - Fitted 18 peaks. - Parameters of the first peak. Center: 38563.8, intensity: 26.22137, alpha: 0.13125, beta: 0.01990, sigma: 125475.11036, gamma: -6681.38965 - .. categories:: .. sourcelink:: diff --git a/docs/source/algorithms/HyspecScharpfCorrection-v1.rst b/docs/source/algorithms/HyspecScharpfCorrection-v1.rst new file mode 100644 index 0000000000000000000000000000000000000000..dff4da2f7ebdbc2bd781e5d2f629a935976257f1 --- /dev/null +++ b/docs/source/algorithms/HyspecScharpfCorrection-v1.rst @@ -0,0 +1,108 @@ + +.. algorithm:: + +.. summary:: + +.. alias:: + +.. properties:: + +Description +----------- + +This algorithm is applying a polarization correction for single crystal +inelastic experiments. If one measures scattering intensity with polarization +along the momentum transfer :math:`Q`, perpendicular to it in the horizontal +plane, and in the vertical direction, one can write the spin incoherent +contribution as: + +.. math:: + + I_{SI}=\frac{3}{2}\left(\Sigma_x^{nsf}-\Sigma_y^{nsf}+\Sigma_z^{sf}\right) + +where the *sf* and *nsf* subscripts stand for spin flip and non-spin flip. +The *x* direction is parallel to :math:`Q`, *y* is perpendicular in the horizontal +plane, while *z* is vertically up. **NOTE**: this is not the Mantid convention. + +However, for instruments with multiple detectors and using time of flight +neutrons, one has a constantly varying angle between :math:`Q` and polarization. +If we call this angle :math:`\alpha` (Scharpf angle), the above equation can +be written as: + +.. math:: + + I_{SI}=\frac{3}{2}\left(\frac{\Sigma_{x'}^{nsf}-\Sigma_{y'}^{nsf}}{\cos^2\alpha-\sin^2\alpha}\right)+\frac{3}{2}\Sigma_z^{sf} + +This algorithm calculates the Scharpf angle for every event or energy transfer bin, +then divides the intensity by :math:`F=\cos^2\alpha-\sin^2\alpha=\cos(2\alpha)`. +In places where *F* is less than the `Precision`, the intensity of the output workspace is set to 0. + +For a detector at angle :math:`\theta` in the horizontal plane, the angle +between :math:`Q` and :math:`k_i` is given by + +.. math:: + + \gamma=\arctan2\left(-\frac{k_f}{k_i}\sin\theta, 1-\frac{k_f}{k_i}\cos\theta\right) + +The Scharpf angle is then: + +.. math:: + \alpha = \gamma- PolarizationAngle + +.. Note:: + + This algorithm assumes that all scattering is in the horizontal plane (good enough approximation + for Hyspec instrument, in polarized mode). + +For more information, see + +#. Werner Schweika - *XYZ-polarisation analysis of diffuse magnetic neutron scattering from single crystals*, Journal of Physics: Conference Series, **211**,012026, (2010) doi: `10.1088/1742-6596/211/1/012026 <http://dx.doi.org/10.1088/1742-6596/211/1/012026>`_ + + + +Usage +----- + +**Example - HyspecScharpfCorrection** + +.. testcode:: HyspecScharpfCorrectionExample + + # Create a workspace (NXSPE equivalent) + w = CreateSampleWorkspace(Function='Flat background', NumBanks=1, + BankPixelWidth=1, XUnit='DeltaE', + XMin=-10.25, XMax=20, BinWidth=0.5) + MoveInstrumentComponent(Workspace=w, ComponentName='bank1', X=3, Z=3, RelativePosition=False) + AddSampleLog(Workspace=w,LogName='Ei', LogText='17.1', LogType='Number') + + wsOut = HyspecScharpfCorrection(InputWorkspace=w, + PolarizationAngle=-10, + Precision=0.2) + + # Get the data + intensity = wsOut.readY(0) + bin_boundaries = wsOut.readX(0) + energy_transfer = 0.5*(bin_boundaries[1:]+bin_boundaries[:-1]) + # at DeltaE=5meV, Q makes an angle of 55.7 degrees with incident beam + # If polarization angle is -10 degrees, the intensity should be 0 + # Below this energy, the Scharpf angle correction is negative, above + # is positive. If energy transfer is greater than Ei, intensity is + # set to 0 + print('Intensity at DeltaE= 0meV: {0:.2f}'.format((intensity[energy_transfer==0])[0])) + print('Intensity at DeltaE= 5meV: {0:.2f}'.format((intensity[energy_transfer==5])[0])) + print('Intensity at DeltaE=10meV: {0:.2f}'.format((intensity[energy_transfer==10])[0])) + print('Intensity at DeltaE=19meV: {0:.2f}'.format((intensity[energy_transfer==19])[0])) + + +Output: + +.. testoutput:: HyspecScharpfCorrectionExample + + Intensity at DeltaE= 0meV: -2.37 + Intensity at DeltaE= 5meV: 0.00 + Intensity at DeltaE=10meV: 1.99 + Intensity at DeltaE=19meV: 0.00 + +.. categories:: + +.. sourcelink:: + diff --git a/docs/source/algorithms/MonitorEfficiencyCorUser-v1.rst b/docs/source/algorithms/MonitorEfficiencyCorUser-v1.rst index abb4a94a46c45fc407e621339586530975ded2f6..0c96ec724c1d1acfa91bab72f0e558a816e5edcc 100644 --- a/docs/source/algorithms/MonitorEfficiencyCorUser-v1.rst +++ b/docs/source/algorithms/MonitorEfficiencyCorUser-v1.rst @@ -11,11 +11,9 @@ Description This algorithm normalises the neutron counts by monitor counts with an additional efficiency correction. -To date this algorithm only supports the TOFTOF instrument. - The monitor counts is the total count and it is stored in the SampleLogs of the input workspace. -This count is corrected taking into account the monitor efficiency. The formula used for the correction is stored in the Parameters file and requires the incident energy (Ei), which is stored in the SampleLogs of the input workspace. +This count is corrected taking into account the monitor efficiency. The formula used for the correction is stored in the Parameters file and requires the incident energy (:math:`E_i`), which is stored in the SampleLogs of the input workspace. The corrected value of the monitor counts is used to normalise the input workspace. @@ -23,10 +21,10 @@ The corrected value of the monitor counts is used to normalise the input workspa Restrictions ################################### -A formula named "formula\_eff" must be defined in the instrument -parameters file. It is defined as "monitor counts * sqrt(Ei/25.3)" +- A formula named "formula\_eff" must be defined in the instrument parameters file. For TOFTOF and DNS instruments it is defined as :math:`M\cdot\sqrt{\frac{E_i}{25.3}}`. The incident energy :math:`E_i` and the monitor counts :math:`M` are read in the sample logs of the input workspace. +- Either sample log "monitor\_counts" must be present in the InputWorkspace or the name of the sample log containing monitor counts must be defined under "monitor\_counts\_log" parameter in the instrument parameters file. +- Input workspace must have "Ei" sample log. -The incident energy Ei and the monitor counts are read in the SampleLogs of the input workspace. Usage ----- diff --git a/docs/source/algorithms/TOFTOFMergeRuns-v1.rst b/docs/source/algorithms/TOFTOFMergeRuns-v1.rst index 9ad7573cd4d6ece7d2ca9cb1d692acef23ecbb76..9ebcd6ad5f0d8ad71a1ad6f750068830813bf04d 100644 --- a/docs/source/algorithms/TOFTOFMergeRuns-v1.rst +++ b/docs/source/algorithms/TOFTOFMergeRuns-v1.rst @@ -89,9 +89,9 @@ Usage print("Run number for merged workspaces = list of all workspaces: {}".format(ws3.getRun().getLogData('run_number').value)) # Monitor counts - print("Monitor counts for 1st workspace: {}".format(ws1.getRun().getLogData('monitor_counts').value)) - print("Monitor counts for 2nd workspace: {}".format(ws2.getRun().getLogData('monitor_counts').value)) - print("Monitor counts for merged workspaces = sum over all workspaces: {}".format(ws3.getRun().getLogData('monitor_counts').value)) + print("Monitor counts for 1st workspace: {:.0f}".format(ws1.getRun().getLogData('monitor_counts').value)) + print("Monitor counts for 2nd workspace: {:.0f}".format(ws2.getRun().getLogData('monitor_counts').value)) + print("Monitor counts for merged workspaces = sum over all workspaces: {:.0f}".format(ws3.getRun().getLogData('monitor_counts').value)) Output: @@ -133,9 +133,9 @@ Output: group=GroupWorkspaces('ws1,ws2') groupmerged=TOFTOFMergeRuns(group) - print("Monitor counts for 1st workspace: {}".format(ws1.getRun().getLogData('monitor_counts').value)) - print("Monitor counts for 2nd workspace: {}".format(ws2.getRun().getLogData('monitor_counts').value)) - print("Monitor counts for merged workspaces = sum over all workspaces: {}".format(groupmerged.getRun().getLogData('monitor_counts').value)) + print("Monitor counts for 1st workspace: {:.0f}".format(ws1.getRun().getLogData('monitor_counts').value)) + print("Monitor counts for 2nd workspace: {:.0f}".format(ws2.getRun().getLogData('monitor_counts').value)) + print("Monitor counts for merged workspaces = sum over all workspaces: {:.0f}".format(groupmerged.getRun().getLogData('monitor_counts').value)) Output: diff --git a/docs/source/concepts/IndexProperty.rst b/docs/source/concepts/IndexProperty.rst index d06729582d201f97cb12186a3e436e1a2032e176..966cf8ebe3ba981133954926d01abc761220fec7 100644 --- a/docs/source/concepts/IndexProperty.rst +++ b/docs/source/concepts/IndexProperty.rst @@ -44,14 +44,21 @@ Property declaration is as shown below: .. code-block:: cpp - //Declare property with default settings + #include "MantidAPI/Algorithm.tcc" + + // Declare property with default settings // IndexType::WorkspaceIndex is default - declareWorkspaceInputProperties<MatrixWorkspace>("InputWorkspace"); - - //Declare all arguments - declareWorkspaceInputProperties<MatrixWorkspace>("InputWorkspace", - IndexType::SpectrumNum|IndexType::WorkspaceIndex, PropertyMode::Type::Mandatory, - LockMode::Type::Lock, "This is an input workspace with associated index handling") + declareWorkspaceInputProperties<MatrixWorkspace>( + "InputWorkspace", + "This is an input workspace with associated index handling"); + + // Declare all arguments + declareWorkspaceInputProperties<MatrixWorkspace, + IndexType::SpectrumNum | IndexType::WorkspaceIndex>( + "InputWorkspace", + "This is an input workspace with associated index handling" + /* optional PropertyMode, LockMode, and validator forwarded to WorkspaceProperty */); + Internally, a ``WorkspaceProperty`` is created along with an ``IndexTypeProperty`` for managing the workspace and the type of user-defined input index list respectively. Their names are diff --git a/docs/source/development/AlgorithmMPISupport.rst b/docs/source/development/AlgorithmMPISupport.rst index 29c8ec856811a2c57304cf8f67154b1ec69d85de..13cc17ab6d1e3d607eab08c4a4833d009a9958ea 100644 --- a/docs/source/development/AlgorithmMPISupport.rst +++ b/docs/source/development/AlgorithmMPISupport.rst @@ -266,6 +266,10 @@ In that case the execution mode can simply be determined from the input workspac Here the helper ``Parallel::getCorrespondingExecutionMode`` is used to obtain the 'natural' execution mode from a storage mode, i.e., ``ExecutionMode::Identical`` for ``StorageMode::Cloned``, ``ExecutionMode::Distributed`` for ``StorageMode::Distributed``, and ``ExecutionMode::MasterOnly`` for ``StorageMode::MasterOnly``. More complex algorithms may require more complex decision mechanism, e.g., when there is more than one input workspace. +For many algorithms the base class ``API::ParallelAlgorithm`` provides a sufficient default implementation of ``Algorithm::getParallelExecutionMode()``. +MPI support can simply be enabled by inheriting from ``ParallelAlgorithm`` instead of from ``Algorithm``. +Generally this works only for algorithms with a single input and a single output that either process only non-spectrum data or process all spectra independently. + If none of the other virtual methods listed above is implemented, ``Algorithm`` will run the normal ``exec()`` method on all MPI ranks. The exception are non-master ranks if the execution mode is ``ExecutionMode::MasterOnly`` -- in that case creating a dummy workspace is attempted. This is discussed in more detail in the subsections below. @@ -447,12 +451,27 @@ Potential limitations must be described in the comments. Supported Algorithms #################### -=============== =============== ======== -Algorithm Supported modes Comments -=============== =============== ======== -CreateWorkspace all -Rebin all min and max bin boundaries must be given explicitly -=============== =============== ======== +================= =============== ======== +Algorithm Supported modes Comments +================= =============== ======== +CompressEvents all +CreateWorkspace all +CropWorkspace all see ExtractSpectra regarding X cropping +ExtractSpectra2 all currently not available via algorithm factory or Python +ExtractSpectra all not supported with ``DetectorList``, cropping in X may exhibit inconsistent behavior in case spectra have common boundaries within some ranks but not within all ranks or across ranks +FilterBadPulses all +FilterByLogValue all +LoadEventNexus Distributed storage mode of output cannot be changed via a parameter currently, min and max bin boundary are not globally the same +LoadInstrument all +LoadNexusLogs all +LoadParameterFile all segfaults when used in unit tests with MPI threading backend due to `#9365 <https://github.com/mantidproject/mantid/issues/9365>`_, normal use should be ok +MaskBins all +Rebin all min and max bin boundaries must be given explicitly +RemovePromptPulse all +SortEvents all +================= =============== ======== + +Currently none of the above algorithms works with ``StorageMode::Distributed`` in case there are zero spectra on any rank. .. rubric:: Footnotes diff --git a/docs/source/images/sans_isis_v2_run_tab_data_table.png b/docs/source/images/sans_isis_v2_run_tab_data_table.png index 4f14f968bb32004771b1321fbe4e0854cceb6167..7ca3d833eab727b1562e18627318cc38143bee34 100644 Binary files a/docs/source/images/sans_isis_v2_run_tab_data_table.png and b/docs/source/images/sans_isis_v2_run_tab_data_table.png differ diff --git a/docs/source/images/sans_isis_v2_whole_gui.png b/docs/source/images/sans_isis_v2_whole_gui.png index 8af01b20c600eb7f8cfb71d43fe73242d9c909a9..0809703e4c6698a0411f6116383f0ae391f35e0d 100644 Binary files a/docs/source/images/sans_isis_v2_whole_gui.png and b/docs/source/images/sans_isis_v2_whole_gui.png differ diff --git a/docs/source/interfaces/ISIS SANS v2.rst b/docs/source/interfaces/ISIS SANS v2.rst index 37d56c76d1f9c89c84542d937533f5f89c6fa713..d4ca27bc73b1cdf7f9fc7f9b583b11f09394cf4c 100644 --- a/docs/source/interfaces/ISIS SANS v2.rst +++ b/docs/source/interfaces/ISIS SANS v2.rst @@ -60,10 +60,43 @@ Data Table +-------+--------------------------+-----------------------------------------------------------------------------------------+ | **9** | **Select instrument** | Selects the instrument to use. Note that this setting is used to resolve run numbers. | +-------+--------------------------+-----------------------------------------------------------------------------------------+ -| **10**| **Options** | This column allows the user to provide row specific settings. Currently only | -| | | **WavelengthMin** and **WavelengthMax** can be set here. | +| **10**| **Unused Functionality** | These icons are not used | +-------+--------------------------+-----------------------------------------------------------------------------------------+ +Columns +^^^^^^^ + ++--------------------------+-------------------------------------------------------------------------------------------------+ +| **SampleScatter** | Scatter data file to use. This is the only mandatory field | ++--------------------------+-------------------------------------------------------------------------------------------------+ +| **ssp** | Sample scatter period, if not specified all periods will be used (where applicable) | ++--------------------------+-------------------------------------------------------------------------------------------------+ +| **SampleTrans** | Transmission data file to use. | ++--------------------------+-------------------------------------------------------------------------------------------------+ +| **stp** | Sample scatter period, if not specified all periods will be used (where applicable) | ++--------------------------+-------------------------------------------------------------------------------------------------+ +| **SampleDirect** | Direct data file to use | ++--------------------------+-------------------------------------------------------------------------------------------------+ +| **sdp** | Sample direct period, if not specified all periods will be used (where applicable) | ++--------------------------+-------------------------------------------------------------------------------------------------+ +| **CanScatter** | Scatter datafile for can run | ++--------------------------+-------------------------------------------------------------------------------------------------+ +| **csp** | Can scatter period, if not specified all periods will be used (where applicable) | ++--------------------------+-------------------------------------------------------------------------------------------------+ +| **CanTrans** | Transmission datafile for can run | ++--------------------------+-------------------------------------------------------------------------------------------------+ +| **ctp** | Can transmission period, if not specified all periods will be used (where applicable) | ++--------------------------+-------------------------------------------------------------------------------------------------+ +| **CanDirect** | Direct datafile for can run | ++--------------------------+-------------------------------------------------------------------------------------------------+ +| **OutputName** | Name of output workspace | ++--------------------------+-------------------------------------------------------------------------------------------------+ +| **User File** | User file to use for this row. If specified it will override any options set in the GUI, | +| | otherwise the default file will be used. | ++--------------------------+-------------------------------------------------------------------------------------------------+ +| **Options** | This column allows the user to provide row specific settings. Currently only **WavelengthMin**| +| | and WavelengthMax can be set here. | ++--------------------------+-------------------------------------------------------------------------------------------------+ Save Options ^^^^^^^^^^^^ diff --git a/docs/source/release/v3.12.0/diffraction.rst b/docs/source/release/v3.12.0/diffraction.rst index 79174ecb3939e6ada94c7ff15821a054b6fecc3b..6d4036d18fb5a63d69cd8159a833a485b19364a4 100644 --- a/docs/source/release/v3.12.0/diffraction.rst +++ b/docs/source/release/v3.12.0/diffraction.rst @@ -22,6 +22,9 @@ Powder Diffraction Engineering Diffraction ----------------------- +- :ref:`algm-GSASIIRefineFitPeaks>` has been re-integrated with the + latest version of GSAS-II, allowing Rietveld and Pawley refinement + within Mantid. - Usability improvements in the GUI: - The "Invalid RB number" popup window in the GUI has been replaced with a more user-friendly message - Improved progress reporting for Calibration and Focus @@ -30,11 +33,20 @@ Single Crystal Diffraction -------------------------- - :ref:`FilterPeaks <algm-FilterPeaks>` now supports filtering peaks by TOF, d-spacing, and wavelength. -- HB3A reduction interface has been enhanced. A child window is added to it for users to pre-process scans and save the processed and merged data to NeXus files in order to save time when they start to reduce and visualize the data. +- HB3A reduction interface has been enhanced. A child window is added to it for users to pre-process scans and save the processed and merged data to NeXus files in order to save time when they start to reduce and visualize the data. A record file is generated along with processed scans to record the calibration information. During data reduction, scans that have been processed in pre-processing will be loaded automatically from corresponding MD files. + +- In HB3A reduction intervace, section for downloading experimental data via http server has been removed from main UI. + - :ref:`IntegratePeaksMDHKL <algm-IntegratePeaksMDHKL>` now has option to specify background shell instead of using default background determination. Imaging ------- +Features Removed +---------------- + +* The "Test the Curve Fit widget" graphical interface has been removed, it was a test harness for developers and was not intended to be exposed during earlier releases. + + :ref:`Release 3.12.0 <v3.12.0>` diff --git a/docs/source/release/v3.12.0/framework.rst b/docs/source/release/v3.12.0/framework.rst index 6b6eed76cd9adb64b0492a33f27d810a460cab79..6453e06fc3f81b8fc841b1fb3bc5e10e075ee190 100644 --- a/docs/source/release/v3.12.0/framework.rst +++ b/docs/source/release/v3.12.0/framework.rst @@ -23,10 +23,12 @@ Algorithms ---------- :ref:`NormaliseToMonitor <algm-NormaliseToMonitor>` now supports workspaces with detector scans and workspaces with single-count point data. +- It is now possible to choose between weighted and unweighted fitting in :ref:`CalculatePolynomialBackground <algm-CalculatePolynomialBackground>`. - :ref:`CreateWorkspace <algm-CreateWorkspace>` will no longer create a default (and potentially wrong) mapping from spectra to detectors, unless a parent workspace is given. This change ensures that accidental bad mappings that could lead to corrupted data are not created silently anymore. This change does *not* affect the use of this algorithm if: (1) a parent workspace is given, or (2) no instrument is loaded into to workspace at a later point, or (3) an instrument is loaded at a later point but ``LoadInstrument`` is used with ``RewriteSpectraMapping=True``. See also the algorithm documentation for details. - :ref:`ConjoinWorkspaces <algm-ConjoinWorkspaces>` now supports non-constant bins. - :ref:`Fit <algm-Fit>` will now respect excluded ranges when ``CostFunction = 'Unweighted least squares'``. - :ref:`NormaliseToMonitor <algm-NormaliseToMonitor>` now supports non-constant number of bins. +- :ref:`MaskBins <algm-MaskBins>` now uses a modernized and standardized way for providing a list of workspace indices. For compatibility reasons the previous ``SpectraList`` property is still supported. Core Functionality ------------------ diff --git a/docs/source/release/v3.12.0/indirect_inelastic.rst b/docs/source/release/v3.12.0/indirect_inelastic.rst index 6759792bee11b44c4a2c2cfa8e89ca33c6649c2c..140f023bac9f6d69e129dee8d130d2097f31283d 100644 --- a/docs/source/release/v3.12.0/indirect_inelastic.rst +++ b/docs/source/release/v3.12.0/indirect_inelastic.rst @@ -14,6 +14,8 @@ New Improved ######## +- BASISReduction now permits the user to exclude a contiguous time segment from the reduction process + Vesuvio ------- diff --git a/docs/source/release/v3.12.0/spectroscopy.rst b/docs/source/release/v3.12.0/spectroscopy.rst index 7ec4f715360f51b3de20aa1590ec1ee55aef1760..7839e668f7836233ec949792c25f4b5cdbbb69b9 100644 --- a/docs/source/release/v3.12.0/spectroscopy.rst +++ b/docs/source/release/v3.12.0/spectroscopy.rst @@ -12,11 +12,13 @@ Spectroscopy Changes Direct Geometry --------------- +- New algorithm :ref:`HyspecScharpfCorrection <algm-HyspecScharpfCorrection-v1>` that can be used to calculate spin incoherent scattering from polarized neutron data - TOFTOF data reduction GUI has been improved. In the new version it has options to delete intermediate workspaces, to replace NaNs in S(Q,W), to create diffractograms and to save the reduced data in NXSPE and NeXus format. +- :ref:`algm-MonitorEfficiencyCorUser` is not anymore restricted to TOFTOF instrument. Indirect Geometry ----------------- - New algorithm :ref:`BASISDiffraction <algm-BASISDiffraction-v1>` to determine the orientation of crystal samples for the BASIS beamline. -:ref:`Release 3.12.0 <v3.12.0>` \ No newline at end of file +:ref:`Release 3.12.0 <v3.12.0>` diff --git a/instrument/DNS_Parameters.xml b/instrument/DNS_Parameters.xml index ef02233234304ae22506d545d7c9f3ca95551611..d15fdcb8a02f3e1a5f2b08c59461b19624e2eb98 100644 --- a/instrument/DNS_Parameters.xml +++ b/instrument/DNS_Parameters.xml @@ -52,6 +52,11 @@ <value val="sqrt(e/25.3)" /> </parameter> + <!-- name of the sample log to store monitor counts. Algorithm: MonitorEfficiencyCorUser --> + <parameter name="monitor_counts_log" type="string"> + <value val="mon_sum" /> + </parameter> + <!-- Distance [m] between sample and equatorial line of the detector. Mandatory if you want to correct the flight paths. Also used by the loader to estimate EPP. --> <parameter name="l2" type="string"> diff --git a/instrument/Facilities.xml b/instrument/Facilities.xml index b218298efb24d2994ad442ec7f41235c8731dfab..061fd364bd169b7259d6a12d23059867425af0ab 100644 --- a/instrument/Facilities.xml +++ b/instrument/Facilities.xml @@ -799,14 +799,14 @@ <instrument name="ISIS_Histogram"> <technique>Test Listener</technique> <livedata> - <connection name="histo" address="localhost:56789" listener="ISISHistoDataListener" /> + <connection name="histo" address="127.0.0.1:56789" listener="ISISHistoDataListener" /> </livedata> </instrument> <instrument name="ISIS_Event"> <technique>Test Listener</technique> <livedata> - <connection name="event" address="localhost:59876" listener="ISISLiveEventDataListener" /> + <connection name="event" address="127.0.0.1:59876" listener="ISISLiveEventDataListener" /> </livedata> </instrument> diff --git a/instrument/TOFTOF_Parameters.xml b/instrument/TOFTOF_Parameters.xml index db9f457c7985f85c8604375785957d6f54a32e88..fcc356df299708094bba9380ea4d56732d1e1098 100644 --- a/instrument/TOFTOF_Parameters.xml +++ b/instrument/TOFTOF_Parameters.xml @@ -25,6 +25,11 @@ <value val="sqrt(e/25.3)" /> </parameter> + <!-- name of the sample log to store monitor counts. Algorithm: MonitorEfficiencyCorUser --> + <parameter name="monitor_counts_log" type="string"> + <value val="monitor_counts" /> + </parameter> + <!-- parameters to merge sample logs by MergeRuns algorithm --> <parameter name="sample_logs_sum" type="string"> <value val="monitor_counts, duration" /> diff --git a/qt/CMakeLists.txt b/qt/CMakeLists.txt index 3189ce2ebb43188657aebbfbc3c78f6d51c03503..f32ea2d43b601759de7fee94d1e408599e404bc2 100644 --- a/qt/CMakeLists.txt +++ b/qt/CMakeLists.txt @@ -4,7 +4,7 @@ find_package ( QScintillaQt4 REQUIRED ) # Utilities for defining targets -include (QtTargetFunctions) +include ( QtTargetFunctions ) add_subdirectory ( widgets ) add_subdirectory ( python ) @@ -12,32 +12,3 @@ add_subdirectory ( scientific_interfaces ) if ( MAKE_VATES ) add_subdirectory ( paraview_ext ) endif ( MAKE_VATES ) - -########################################################################### -# Add a custom target to build all of the MantidQt packages -########################################################################### - -if ( MSVC_IDE ) -# # Add to the 'MantidQt' group in VS - set_property ( TARGET MantidQtWidgetsCommonQt4 PROPERTY FOLDER "MantidQt" ) - set_property ( TARGET MantidQtWidgetsLegacyQwtQt4 PROPERTY FOLDER "MantidQt" ) - set_property ( TARGET MantidQtWidgetsFactoryQt4 PROPERTY FOLDER "MantidQt" ) - set_property ( TARGET MantidQtWidgetsInstrumentViewQt4 PROPERTY FOLDER "MantidQt" ) - set_property ( TARGET MantidQtWidgetsRefDetectorViewQt4 PROPERTY FOLDER "MantidQt" ) - set_property ( TARGET MantidQtWidgetsSliceViewerQt4 PROPERTY FOLDER "MantidQt" ) - set_property ( TARGET MantidQtWidgetsSpectrumViewerQt4 PROPERTY FOLDER "MantidQt" ) - set_property ( TARGET MantidQtWidgetsPluginsDesignerQt4 PROPERTY FOLDER "MantidQt" ) - set_property ( TARGET MantidQtWidgetsPluginsAlgorithmDialogsQt4 PROPERTY FOLDER "MantidQt" ) - set_property ( TARGET mantidqtpython PROPERTY FOLDER "MantidQt" ) -else () - add_custom_target ( MantidQt DEPENDS MantidQtWidgetsCommonQt4 - MantidQtWidgetsLegacyQwtQt4 - MantidQtWidgetsFactoryQt4 - MantidQtWidgetsInstrumentViewQt4 - MantidQtWidgetsSliceViewerQt4 - MantidQtWidgetsSpectrumViewerQt4 - MantidQtWidgetsRefDetectorViewQt4 - MantidQtWidgetsPluginsAlgorithmDialogsQt4 - mantidqtpython - ) -endif () diff --git a/qt/python/CMakeLists.txt b/qt/python/CMakeLists.txt index 7e9a393f5e028b6bc87aac2ee9a55e94ea939c9a..d8ae39214d00fa852f081fc9e32e00a3e8bd99cf 100644 --- a/qt/python/CMakeLists.txt +++ b/qt/python/CMakeLists.txt @@ -1,2 +1,18 @@ +# This file manages building/installation of the mantidqt and mantidqtpython +# Python wrappers. +# + # Legacy wrappers for MantidPlot add_subdirectory ( mantidqtpython ) + +# mantidqt is run from the source directory so just add tests +set ( PYTHON_TEST_FILES + mantidqt/test/import_test.py +) + +# Tests +pyunittest_add_test ( ${CMAKE_CURRENT_SOURCE_DIR} + mantidqt ${PYTHON_TEST_FILES} +) + +# No package installation yet... diff --git a/qt/python/mantidqt/__init__.py b/qt/python/mantidqt/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fa2759b174eda6295744b9b70988c80551ca6df6 --- /dev/null +++ b/qt/python/mantidqt/__init__.py @@ -0,0 +1,22 @@ +# This file is part of the mantidqt package +# +# Copyright (C) 2017 mantidproject +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +""" A collection of Qt widgets and functionality common to many +Mantid-based applications/interfaces. +""" + +# This file should be left free of PyQt imports to allow quick importing +# of the main package. diff --git a/qt/python/mantidqt/test/import_test.py b/qt/python/mantidqt/test/import_test.py new file mode 100644 index 0000000000000000000000000000000000000000..672afdaaf1ff83ca644c112f0badebeb1557854e --- /dev/null +++ b/qt/python/mantidqt/test/import_test.py @@ -0,0 +1,12 @@ +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import unittest + +class ImportTest(unittest.TestCase): + + def test_import(self): + import mantidqt # noqa + +if __name__ == "__main__": + unittest.main() diff --git a/qt/python/mantidqtpython/CMakeLists.txt b/qt/python/mantidqtpython/CMakeLists.txt index 96a1ae717769629f01673d87b3e15f4465ae5129..9b75792fa65d1b9814d250a88ac30c92a5e4b08e 100644 --- a/qt/python/mantidqtpython/CMakeLists.txt +++ b/qt/python/mantidqtpython/CMakeLists.txt @@ -95,7 +95,7 @@ mtd_add_sip_module ( Qt4::QtOpenGL Qwt5 ${PYTHON_LIBRARIES} - FOLDER MantidQt4 + FOLDER Qt4 ) if ( MSVC ) diff --git a/qt/scientific_interfaces/CMakeLists.txt b/qt/scientific_interfaces/CMakeLists.txt index e979e681458c44050ef2a469c5abd13d92cd78ac..25a9fa612bf0b3eab71e068b2ce4bf437bbb6835 100644 --- a/qt/scientific_interfaces/CMakeLists.txt +++ b/qt/scientific_interfaces/CMakeLists.txt @@ -30,6 +30,7 @@ set ( TEST_FILES test/ALCLatestFileFinderTest.h test/ALCPeakFittingModelTest.h test/ALCPeakFittingPresenterTest.h + test/EnggDiffFittingModelTest.h test/EnggDiffFittingPresenterTest.h test/EnggDiffractionPresenterTest.h test/IO_MuonGroupingTest.h diff --git a/qt/scientific_interfaces/DynamicPDF/CMakeLists.txt b/qt/scientific_interfaces/DynamicPDF/CMakeLists.txt index 91366ed47ed97205bb7a955bc509cbb3e6a60cac..44b0c90ddc870dbf622dedd3d738145d57a67375 100644 --- a/qt/scientific_interfaces/DynamicPDF/CMakeLists.txt +++ b/qt/scientific_interfaces/DynamicPDF/CMakeLists.txt @@ -5,7 +5,6 @@ set ( SRC_FILES DPDFFitOptionsBrowser.cpp DPDFFourierTransform.cpp DPDFInputDataControl.cpp - DisplayCurveFitTest.cpp SliceSelector.cpp ) @@ -19,12 +18,10 @@ set ( INC_FILES DPDFFitOptionsBrowser.h DPDFFourierTransform.h DPDFInputDataControl.h - DisplayCurveFitTest.h SliceSelector.h ) -set ( MOC_FILES - DisplayCurveFitTest.h +set ( MOC_FILES DPDFBackgroundRemover.h DPDFDisplayControl.h DPDFFitControl.h @@ -34,8 +31,8 @@ set ( MOC_FILES SliceSelector.h ) -set ( UI_FILES - DisplayCurveFitTest.ui + +set ( UI_FILES DPDFBackgroundRemover.ui DPDFFitControl.ui DPDFFourierTransform.ui @@ -68,8 +65,3 @@ mtd_add_qt_library (TARGET_NAME MantidScientificInterfacesDynamicPDF OSX_INSTALL_RPATH @loader_path/../../Contents/MacOS ) - -if ( MSVC_IDE ) - # Add to the 'ScientificInterfaces' group in VS - set_property ( TARGET MantidScientificInterfacesDynamicPDFQt4 PROPERTY FOLDER "ScientificInterfaces" ) -endif() diff --git a/qt/scientific_interfaces/DynamicPDF/DisplayCurveFitTest.cpp b/qt/scientific_interfaces/DynamicPDF/DisplayCurveFitTest.cpp deleted file mode 100644 index 542dab3e167a3f8fab629c0850e4be8ed9d8709e..0000000000000000000000000000000000000000 --- a/qt/scientific_interfaces/DynamicPDF/DisplayCurveFitTest.cpp +++ /dev/null @@ -1,106 +0,0 @@ -#include "DisplayCurveFitTest.h" -// includes for workspace handling -#include "MantidAPI/AnalysisDataService.h" -#include "MantidAPI/MatrixWorkspace.h" -// includes for interface functionality -#include "MantidQtWidgets/LegacyQwt/DisplayCurveFit.h" -#include "MantidQtWidgets/LegacyQwt/RangeSelector.h" - -namespace { -Mantid::Kernel::Logger g_log("DynamicPDF"); -} - -namespace MantidQt { -namespace CustomInterfaces { -namespace DynamicPDF { - -// Add this class to the list of specialised dialogs in this namespace only if -// compiling in Debug mode -//#ifndef NDEBUG -DECLARE_SUBWINDOW(DisplayCurveFitTest) -//#endif - -using curveType = MantidQt::MantidWidgets::DisplayCurveFit::curveType; -using dcRange = MantidQt::MantidWidgets::DisplayCurveFit::dcRange; - -// ++++++++++++++++++++++++++++ -// +++++ Public Members +++++ -// ++++++++++++++++++++++++++++ - -/// Constructor -DisplayCurveFitTest::DisplayCurveFitTest(QWidget *parent) - : UserSubWindow{parent} {} - -DisplayCurveFitTest::~DisplayCurveFitTest() {} - -/** - * @brief Initialize the widgets defined within the form generated in - * Qt-Designer. Also, defined the SIGNALS to SLOTS connections. - */ -void DisplayCurveFitTest::initLayout() { - m_uiForm.setupUi(this); - connect(m_uiForm.dataSelector, SIGNAL(dataReady(const QString &)), this, - SLOT(loadSpectra(const QString &))); -} - -// +++++++++++++++++++++++++++ -// +++++ Private Slots +++++ -// +++++++++++++++++++++++++++ - -/** - * @brief The test proper that loads the fit curves to be - * displayed and the two ranges. - * @param workspaceName the name of the workspace containing - * the data of the curves to be displayed. - */ -void DisplayCurveFitTest::loadSpectra(const QString &workspaceName) { - auto workspace = Mantid::API::AnalysisDataService::Instance() - .retrieveWS<Mantid::API::MatrixWorkspace>( - workspaceName.toStdString()); - if (!workspace) { - auto title = QString::fromStdString(this->name()); - auto error = - QString::fromStdString("Workspace must be of type MatrixWorkspace"); - QMessageBox::warning(this, title, error); - return; - } - if (workspace->getNumberHistograms() < 4) { - auto title = QString::fromStdString(this->name()); - auto error = QString("Not enough number of histograms in the workspace"); - QMessageBox::warning(this, title, error); - return; - } - m_uiForm.displayFit->addSpectrum(curveType::data, workspace, 0); - auto curveRange = m_uiForm.displayFit->getCurveRange(curveType::data); - static bool firstPass{TRUE}; - - // Set up the range selector for the fit - m_uiForm.displayFit->addRangeSelector(dcRange::fit); - auto rangeSelectorFit = m_uiForm.displayFit->m_rangeSelector.at(dcRange::fit); - if (firstPass || m_uiForm.updateRangeSelectors->isChecked()) { - rangeSelectorFit->setRange(curveRange.first, curveRange.second); - rangeSelectorFit->setMinimum(1.05 * curveRange.first); - rangeSelectorFit->setMaximum(0.95 * curveRange.second); - } - - // Set up the range evaluate range selector - m_uiForm.displayFit->addRangeSelector(dcRange::evaluate); - auto rangeSelectorEvaluate = - m_uiForm.displayFit->m_rangeSelector.at(dcRange::evaluate); - if (firstPass || m_uiForm.updateRangeSelectors->isChecked()) { - rangeSelectorEvaluate->setRange(curveRange.first, curveRange.second); - rangeSelectorEvaluate->setMinimum(curveRange.first); - rangeSelectorEvaluate->setMaximum(curveRange.second); - } - - m_uiForm.displayFit->addSpectrum(curveType::fit, workspace, 1); - m_uiForm.displayFit->addSpectrum(curveType::residuals, workspace, 2); - m_uiForm.displayFit->addSpectrum(curveType::guess, workspace, 3); - - m_uiForm.displayFit->addResidualsZeroline(); - firstPass = FALSE; -} - -} // namespace MantidQt -} // namespace CustomInterfaces -} // namespace DynamicPDF diff --git a/qt/scientific_interfaces/DynamicPDF/DisplayCurveFitTest.h b/qt/scientific_interfaces/DynamicPDF/DisplayCurveFitTest.h deleted file mode 100644 index 86e5d8ed7c48fb3f29481bbdcbbe9d4a7140b424..0000000000000000000000000000000000000000 --- a/qt/scientific_interfaces/DynamicPDF/DisplayCurveFitTest.h +++ /dev/null @@ -1,65 +0,0 @@ -#ifndef MANTIDQTCUSTOMINTERFACES_DYNAMICPDF_DISPLAYCURVEFITTEST_H_ -#define MANTIDQTCUSTOMINTERFACES_DYNAMICPDF_DISPLAYCURVEFITTEST_H_ - -// includes for interace functionailty -#include "DllConfig.h" -#include "MantidQtWidgets/Common/UserSubWindow.h" -#include "ui_DisplayCurveFitTest.h" - -namespace MantidQt { -namespace CustomInterfaces { -namespace DynamicPDF { - -/** An interface whose only purpose is to test widget DisplayCurveFit - The interface is visible in MantidPlot only when compiled in Debug mode. - - @date 2016-02-22 - - Copyright © 2012 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge - National Laboratory & European Spallation Source - - This file is part of Mantid. - - Mantid is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3 of the License, or - (at your option) any later version. - - Mantid is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see <http://www.gnu.org/licenses/>. - - File change history is stored at: <https://github.com/mantidproject/mantid> - Code Documentation is available at: <http://doxygen.mantidproject.org> -*/ -class MANTIDQT_DYNAMICPDF_DLL DisplayCurveFitTest - : public MantidQt::API::UserSubWindow { - Q_OBJECT - -public: - /// The name of the interface as registered into the factory - static std::string name() { return "Test the DisplayCurveFit widget"; } - // This interface's categories. - static QString categoryInfo() { return "DynamicPDF"; } - - DisplayCurveFitTest(QWidget *parent = nullptr); - ~DisplayCurveFitTest() override; - -private slots: - void loadSpectra(const QString &workspaceName); - -private: - void initLayout() override; - /// The object containing the widgets defined in the form created in Qt - /// Designer - Ui::DisplayCurveFitTest m_uiForm; - -}; // class DisplayCurveFitTest -} // namespace CustomInterfaces -} // namespace DynamicPDF -} // namespace MantidQt -#endif // MANTIDQTCUSTOMINTERFACES_DYNAMICPDF_DISPLAYCURVEFITTEST_H_ diff --git a/qt/scientific_interfaces/DynamicPDF/DisplayCurveFitTest.ui b/qt/scientific_interfaces/DynamicPDF/DisplayCurveFitTest.ui deleted file mode 100644 index b88a6e54bc845f60c557fbf95deee141757016c2..0000000000000000000000000000000000000000 --- a/qt/scientific_interfaces/DynamicPDF/DisplayCurveFitTest.ui +++ /dev/null @@ -1,79 +0,0 @@ -<?xml version="1.0" encoding="UTF-8"?> -<ui version="4.0"> - <class>DisplayCurveFitTest</class> - <widget class="QMainWindow" name="DisplayCurveFitTest"> - <property name="geometry"> - <rect> - <x>0</x> - <y>0</y> - <width>613</width> - <height>740</height> - </rect> - </property> - <property name="sizePolicy"> - <sizepolicy hsizetype="Preferred" vsizetype="Preferred"> - <horstretch>0</horstretch> - <verstretch>0</verstretch> - </sizepolicy> - </property> - <property name="windowTitle"> - <string>Dynamic PDF - Test DisplayCurveFit</string> - </property> - <widget class="QWidget" name="centralwidget"> - <property name="sizePolicy"> - <sizepolicy hsizetype="Preferred" vsizetype="Expanding"> - <horstretch>0</horstretch> - <verstretch>0</verstretch> - </sizepolicy> - </property> - <layout class="QVBoxLayout" name="verticalLayout"> - <item> - <widget class="MantidQt::MantidWidgets::DataSelector" name="dataSelector"> - <property name="sizePolicy"> - <sizepolicy hsizetype="Preferred" vsizetype="Minimum"> - <horstretch>0</horstretch> - <verstretch>0</verstretch> - </sizepolicy> - </property> - </widget> - </item> - <item> - <widget class="MantidQt::MantidWidgets::DisplayCurveFit" name="displayFit"> - <property name="sizePolicy"> - <sizepolicy hsizetype="Preferred" vsizetype="Expanding"> - <horstretch>0</horstretch> - <verstretch>1</verstretch> - </sizepolicy> - </property> - </widget> - </item> - <item> - <layout class="QHBoxLayout" name="horizontalLayout"> - <item> - <widget class="QCheckBox" name="updateRangeSelectors"> - <property name="text"> - <string>Update range selectors?</string> - </property> - </widget> - </item> - </layout> - </item> - </layout> - </widget> - </widget> - <customwidgets> - <customwidget> - <class>MantidQt::MantidWidgets::DataSelector</class> - <extends>QWidget</extends> - <header>MantidQtWidgets/Common/DataSelector.h</header> - </customwidget> - <customwidget> - <class>MantidQt::MantidWidgets::DisplayCurveFit</class> - <extends>QWidget</extends> - <header>MantidQtWidgets/LegacyQwt/DisplayCurveFit.h</header> - <container>1</container> - </customwidget> - </customwidgets> - <resources/> - <connections/> -</ui> diff --git a/qt/scientific_interfaces/EnggDiffraction/CMakeLists.txt b/qt/scientific_interfaces/EnggDiffraction/CMakeLists.txt index 985648af0074a208841bb5256b44c65d0f60990f..9981de062e1f80f82a29d8fb2b871eee08abcf4c 100644 --- a/qt/scientific_interfaces/EnggDiffraction/CMakeLists.txt +++ b/qt/scientific_interfaces/EnggDiffraction/CMakeLists.txt @@ -1,4 +1,5 @@ set ( SRC_FILES + EnggDiffFittingModel.cpp EnggDiffFittingPresenter.cpp EnggDiffFittingViewQtWidget.cpp EnggDiffractionPresenter.cpp @@ -9,6 +10,7 @@ set ( SRC_FILES # IMPORTANT: Include files are required in the MOC_FILES set. Scroll down to find it. set ( INC_FILES EnggDiffCalibSettings.h + EnggDiffFittingModel.h EnggDiffFittingPresWorker.h EnggDiffFittingPresenter.h EnggDiffFittingViewQtWidget.h @@ -21,6 +23,7 @@ set ( INC_FILES ) set ( MOC_FILES + EnggDiffFittingModel.h EnggDiffFittingPresenter.h EnggDiffFittingPresWorker.h EnggDiffFittingViewQtWidget.h @@ -67,7 +70,3 @@ mtd_add_qt_library (TARGET_NAME MantidScientificInterfacesEnggDiffraction @loader_path/../../plugins/qt4 ) -if ( MSVC_IDE ) -# # Add to the 'ScientificInterfaces' group in VS - set_property ( TARGET MantidScientificInterfacesEnggDiffractionQt4 PROPERTY FOLDER "ScientificInterfaces" ) -endif() diff --git a/qt/scientific_interfaces/EnggDiffraction/EnggDiffFittingModel.cpp b/qt/scientific_interfaces/EnggDiffraction/EnggDiffFittingModel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6c9cd5cedc9430afbbe35531f3efaf034bc8f7be --- /dev/null +++ b/qt/scientific_interfaces/EnggDiffraction/EnggDiffFittingModel.cpp @@ -0,0 +1,133 @@ +#include "EnggDiffFittingModel.h" + +#include "MantidAPI/AlgorithmManager.h" +#include "MantidAPI/AnalysisDataService.h" +#include "MantidAPI/MatrixWorkspace.h" +#include "MantidAPI/MatrixWorkspace_fwd.h" +#include "MantidAPI/Run.h" +#include "MantidAPI/WorkspaceGroup.h" +#include "MantidKernel/PropertyWithValue.h" + +#include <algorithm> +#include <numeric> + +using namespace Mantid; + +namespace { + +template <typename T> void insertInOrder(const T &item, std::vector<T> &vec) { + vec.insert(std::upper_bound(vec.begin(), vec.end(), item), item); +} + +bool isDigit(const std::string &text) { + return std::all_of(text.cbegin(), text.cend(), ::isdigit); +} + +} // anonymous namespace + +namespace MantidQT { +namespace CustomInterfaces { + +void EnggDiffFittingModel::addWorkspace(const int runNumber, const size_t bank, + const API::MatrixWorkspace_sptr ws) { + m_wsMap[bank - 1][runNumber] = ws; +} + +API::MatrixWorkspace_sptr +EnggDiffFittingModel::getWorkspace(const int runNumber, const size_t bank) { + if (bank < 1 || bank > m_wsMap.size()) { + return nullptr; + } + if (m_wsMap[bank - 1].find(runNumber) == m_wsMap[bank - 1].end()) { + return nullptr; + } + return m_wsMap[bank - 1][runNumber]; +} + +std::vector<int> EnggDiffFittingModel::getAllRunNumbers() const { + std::vector<int> runNumbers; + + for (const auto &workspaces : m_wsMap) { + for (const auto &kvPair : workspaces) { + const auto runNumber = kvPair.first; + if (std::find(runNumbers.begin(), runNumbers.end(), runNumber) == + runNumbers.end()) { + insertInOrder(runNumber, runNumbers); + } + } + } + + return runNumbers; +} + +void EnggDiffFittingModel::loadWorkspaces(const std::string &filename) { + auto loadAlg = API::AlgorithmManager::Instance().create("Load"); + loadAlg->initialize(); + + loadAlg->setPropertyValue("Filename", filename); + loadAlg->setPropertyValue("OutputWorkspace", FOCUSED_WS_NAME); + loadAlg->execute(); + + API::AnalysisDataServiceImpl &ADS = API::AnalysisDataService::Instance(); + if (filename.find(",") == std::string::npos) { // Only 1 run loaded + const auto ws = ADS.retrieveWS<API::MatrixWorkspace>(FOCUSED_WS_NAME); + addWorkspace(ws->getRunNumber(), guessBankID(ws), ws); + } else { + const auto group_ws = ADS.retrieveWS<API::WorkspaceGroup>(FOCUSED_WS_NAME); + for (auto iter = group_ws->begin(); iter != group_ws->end(); ++iter) { + const auto ws = boost::dynamic_pointer_cast<API::MatrixWorkspace>(*iter); + addWorkspace(ws->getRunNumber(), guessBankID(ws), ws); + } + } +} + +std::vector<std::pair<int, size_t>> +EnggDiffFittingModel::getRunNumbersAndBanksIDs() { + std::vector<std::pair<int, size_t>> pairs; + + const auto runNumbers = getAllRunNumbers(); + for (const auto runNumber : runNumbers) { + for (size_t i = 0; i < m_wsMap.size(); ++i) { + if (m_wsMap[i].find(runNumber) != m_wsMap[i].end()) { + pairs.push_back(std::pair<int, size_t>(runNumber, i + 1)); + } + } + } + return pairs; +} + +size_t +EnggDiffFittingModel::guessBankID(API::MatrixWorkspace_const_sptr ws) const { + if (ws->run().hasProperty("bankid")) { + const auto log = dynamic_cast<Kernel::PropertyWithValue<int> *>( + ws->run().getLogData("bankid")); + return boost::lexical_cast<size_t>(log->value()); + } + + // couldn't get it from sample logs - try using the old naming convention + auto name = ws->getName(); + std::vector<std::string> chunks; + boost::split(chunks, name, boost::is_any_of("_")); + bool isNum = isDigit(chunks.back()); + if (!chunks.empty() && isNum) { + try { + return boost::lexical_cast<size_t>(chunks.back()); + } catch (boost::exception &) { + // If we get a bad cast or something goes wrong then + // the file is probably not what we were expecting + // so throw a runtime error + throw std::runtime_error( + "Failed to fit file: The data was not what is expected. " + "Does the file contain a focused workspace?"); + } + } + + throw std::runtime_error("Could not guess run number from input workspace. " + "Are you sure it has been focused correctly?"); +} + +const std::string EnggDiffFittingModel::FOCUSED_WS_NAME = + "engggui_fitting_focused_ws"; + +} // namespace CustomInterfaces +} // namespace MantidQT diff --git a/qt/scientific_interfaces/EnggDiffraction/EnggDiffFittingModel.h b/qt/scientific_interfaces/EnggDiffraction/EnggDiffFittingModel.h new file mode 100644 index 0000000000000000000000000000000000000000..9993fa1315b0b009a8e8ff0a20f0847a5cb082e6 --- /dev/null +++ b/qt/scientific_interfaces/EnggDiffraction/EnggDiffFittingModel.h @@ -0,0 +1,38 @@ +#ifndef MANTIDQTCUSTOMINTERFACES_ENGGDIFFRACTION_ENGGDIFFFITTINGMODEL_H_ +#define MANTIDQTCUSTOMINTERFACES_ENGGDIFFRACTION_ENGGDIFFFITTINGMODEL_H_ + +#include "DllConfig.h" +#include "MantidAPI/MatrixWorkspace_fwd.h" + +#include <array> +#include <unordered_map> +#include <vector> + +using namespace Mantid; + +namespace MantidQT { +namespace CustomInterfaces { + +class MANTIDQT_ENGGDIFFRACTION_DLL EnggDiffFittingModel { +public: + API::MatrixWorkspace_sptr getWorkspace(const int runNumber, + const size_t bank); + std::vector<int> getAllRunNumbers() const; + void loadWorkspaces(const std::string &filename); + std::vector<std::pair<int, size_t>> getRunNumbersAndBanksIDs(); + void addWorkspace(const int runNumber, const size_t bank, + const API::MatrixWorkspace_sptr ws); + +private: + static const size_t MAX_BANKS = 2; + static const std::string FOCUSED_WS_NAME; + std::array<std::unordered_map<int, API::MatrixWorkspace_sptr>, MAX_BANKS> + m_wsMap; + + size_t guessBankID(API::MatrixWorkspace_const_sptr) const; +}; + +} // namespace CustomInterfaces +} // namespace MantidQT + +#endif // MANTIDQTCUSTOMINTERFACES_ENGGDIFFRACTION_ENGGDIFFFITTINGMODEL_H_ diff --git a/qt/scientific_interfaces/EnggDiffraction/EnggDiffFittingPresenter.cpp b/qt/scientific_interfaces/EnggDiffraction/EnggDiffFittingPresenter.cpp index 09808dcdff316a764bf5b3de387bdfb1784ce812..385f141bdac433f4b9e83598548e44aad63f594a 100644 --- a/qt/scientific_interfaces/EnggDiffraction/EnggDiffFittingPresenter.cpp +++ b/qt/scientific_interfaces/EnggDiffraction/EnggDiffFittingPresenter.cpp @@ -1,6 +1,7 @@ #include "EnggDiffFittingPresenter.h" #include "MantidAPI/AlgorithmManager.h" #include "MantidAPI/AnalysisDataService.h" +#include "MantidAPI/WorkspaceGroup.h" #include "MantidAPI/ITableWorkspace.h" #include "MantidAPI/MatrixWorkspace.h" #include "MantidAPI/Run.h" @@ -9,6 +10,7 @@ #include "MantidQtWidgets/LegacyQwt/QwtHelper.h" #include "EnggDiffFittingPresWorker.h" +#include <boost/algorithm/string.hpp> #include <boost/lexical_cast.hpp> #include <cctype> #include <fstream> @@ -29,6 +31,7 @@ Mantid::Kernel::Logger g_log("EngineeringDiffractionGUI"); const bool EnggDiffFittingPresenter::g_useAlignDetectors = true; +// MOVE THIS TO THE MODEL const std::string EnggDiffFittingPresenter::g_focusedFittingWSName = "engggui_fitting_focused_ws"; @@ -124,6 +127,10 @@ void EnggDiffFittingPresenter::notify( case IEnggDiffFittingPresenter::LogMsg: processLogMsg(); break; + + case IEnggDiffFittingPresenter::selectRun: + processSelectRun(); + break; } } @@ -345,6 +352,16 @@ std::vector<std::string> EnggDiffFittingPresenter::processFullPathInput( return foundFullFilePaths; } +void EnggDiffFittingPresenter::processSelectRun() { + const auto workspaceID = m_view->getFittingListWidgetCurrentValue(); + std::vector<std::string> tokens; + + boost::split(tokens, workspaceID, boost::is_any_of("_")); + const auto ws = + m_model.getWorkspace(std::stoi(tokens[0]), std::stoi(tokens[1])); + plotFocusedFile(false, ws); +} + /** * Takes the full path of a file which has been selected through * browse, the run number the user has input and stores the @@ -703,40 +720,60 @@ EnggDiffFittingPresenter::enableMultiRun(const std::string &firstRun, void EnggDiffFittingPresenter::processStart() {} -void EnggDiffFittingPresenter::processLoad() { - // while file text-area is not empty - // while directory vector is not empty - // if loaded here set a global variable true so doesnt load again? - - try { - MatrixWorkspace_sptr focusedWS; - const std::string focusedFile = m_view->getFittingRunNo(); - Poco::Path selectedfPath(focusedFile); +size_t EnggDiffFittingPresenter::findBankID( + Mantid::API::MatrixWorkspace_sptr ws) const { + // MOVE THIS TO THE MODEL + size_t bankID = 1; - if (!focusedFile.empty() && selectedfPath.isFile()) { - runLoadAlg(focusedFile, focusedWS); - setDifcTzero(focusedWS); - convertUnits(g_focusedFittingWSName); - plotFocusedFile(false); + auto name = ws->getName(); + std::vector<std::string> chunks; + boost::split(chunks, name, boost::is_any_of("_")); + bool isNum = isDigit(chunks.back()); + if (!chunks.empty() && isNum) { + try { + bankID = boost::lexical_cast<size_t>(chunks.back()); + } catch (boost::exception &) { + // If we get a bad cast or something goes wrong then + // the file is probably not what we were expecting + // so throw a runtime error + throw std::runtime_error( + "Failed to fit file: The data was not what is expected. " + "Does the file contain focused " + + m_view->getCurrentInstrument() + " workspace?"); + } + } + return bankID; +} - m_view->showStatus( - "Focused file loaded! (Click 'Select " - "Peak' to activate peak picker tool, hold Shift + Click " - "Peak, Click 'Add Peak')"); +void EnggDiffFittingPresenter::processLoad() { + const std::string filenames = m_view->getFittingRunNo(); - } else { - m_view->userWarning("No File Found", - "Please select a focused file to load"); - m_view->showStatus("Error while plotting the focused workspace"); - } - } catch (std::invalid_argument &ia) { - m_view->userWarning( - "Failed to load the selected focus file", - "The focus file failed to load, please check the logger for more" - " information."); - g_log.error("Failed to load file. Error message: "); - g_log.error(ia.what()); + try { + m_model.loadWorkspaces(filenames); + } catch (Poco::PathSyntaxException &ex) { + warnFileNotFound(ex); + return; + } catch (std::invalid_argument &ex) { + warnFileNotFound(ex); + return; + } catch (Mantid::Kernel::Exception::NotFoundError &ex) { + warnFileNotFound(ex); + return; } + + const auto runNoBankPairs = m_model.getRunNumbersAndBanksIDs(); + std::vector<std::string> workspaceIDs; + std::transform( + runNoBankPairs.begin(), runNoBankPairs.end(), + std::back_inserter(workspaceIDs), [](const std::pair<int, size_t> &pair) { + return std::to_string(pair.first) + "_" + std::to_string(pair.second); + }); + m_view->enableFittingListWidget(true); + m_view->clearFittingListWidget(); + std::for_each(workspaceIDs.begin(), workspaceIDs.end(), + [&](const std::string &workspaceID) { + m_view->addRunNoItem(workspaceID); + }); } void EnggDiffFittingPresenter::processShutDown() { @@ -921,31 +958,7 @@ std::string EnggDiffFittingPresenter::validateFittingexpectedPeaks( } void EnggDiffFittingPresenter::setDifcTzero(MatrixWorkspace_sptr wks) const { - size_t bankID = 1; - // attempt to guess bankID - this should be done in code that is currently - // in the view - auto fittingFilename = m_view->getFittingRunNo(); - Poco::File fittingFile(fittingFilename); - if (fittingFile.exists()) { - Poco::Path path(fittingFile.path()); - auto name = path.getBaseName(); - std::vector<std::string> chunks; - boost::split(chunks, name, boost::is_any_of("_")); - bool isNum = isDigit(chunks.back()); - if (!chunks.empty() && isNum) { - try { - bankID = boost::lexical_cast<size_t>(chunks.back()); - } catch (boost::exception &) { - // If we get a bad cast or something goes wrong then - // the file is probably not what we were expecting - // so throw a runtime error - throw std::runtime_error( - "Failed to fit file: The data was not what is expected. " - "Does the file contain focused " + - m_view->getCurrentInstrument() + " workspace?"); - } - } - } + const auto bankID = findBankID(wks); const std::string units = "none"; auto &run = wks->mutableRun(); @@ -1714,19 +1727,20 @@ bool EnggDiffFittingPresenter::isDigit(const std::string &text) const { return std::all_of(text.cbegin(), text.cend(), ::isdigit); } -void EnggDiffFittingPresenter::plotFocusedFile(bool plotSinglePeaks) { - AnalysisDataServiceImpl &ADS = Mantid::API::AnalysisDataService::Instance(); +void EnggDiffFittingPresenter::warnFileNotFound(const std::exception &ex) { + m_view->showStatus("Error while loading focused run"); + m_view->userWarning("Invalid file selected", + "Mantid could not load the selected file. " + "Are you sure it exists? " + "See the logger for more information"); + g_log.error("Failed to load file. Error message: "); + g_log.error(ex.what()); +} - if (!ADS.doesExist(g_focusedFittingWSName)) { - g_log.error() << "Focused workspace could not be plotted as there is no " + - g_focusedFittingWSName + " workspace found.\n"; - m_view->showStatus("Error while plotting focused workspace"); - return; - } +void EnggDiffFittingPresenter::plotFocusedFile( + bool plotSinglePeaks, MatrixWorkspace_sptr focusedPeaksWS) { try { - auto focusedPeaksWS = - ADS.retrieveWS<MatrixWorkspace>(g_focusedFittingWSName); auto focusedData = QwtHelper::curveDataFromWs(focusedPeaksWS); // Check that the number of curves to plot isn't excessive @@ -1774,7 +1788,9 @@ void EnggDiffFittingPresenter::plotFitPeaksCurves() { m_view->resetCanvas(); // plots focused workspace - plotFocusedFile(m_fittingFinishedOK); + throw new std::runtime_error("Plotting fit not yet implemented"); + // TODO: sort out what to do here + // plotFocusedFile(m_fittingFinishedOK); if (m_fittingFinishedOK) { g_log.debug() << "single peaks fitting being plotted now.\n"; diff --git a/qt/scientific_interfaces/EnggDiffraction/EnggDiffFittingPresenter.h b/qt/scientific_interfaces/EnggDiffraction/EnggDiffFittingPresenter.h index 16a27d250fd3b5584acff3ed512af6568b773d33..a0736a7ff9c3ad763c8f6214084b5b7935e8e6cb 100644 --- a/qt/scientific_interfaces/EnggDiffraction/EnggDiffFittingPresenter.h +++ b/qt/scientific_interfaces/EnggDiffraction/EnggDiffFittingPresenter.h @@ -4,6 +4,7 @@ #include "MantidAPI/ITableWorkspace_fwd.h" #include "MantidAPI/MatrixWorkspace_fwd.h" #include "DllConfig.h" +#include "EnggDiffFittingModel.h" #include "IEnggDiffFittingPresenter.h" #include "IEnggDiffFittingView.h" #include "IEnggDiffractionCalibration.h" @@ -87,7 +88,8 @@ public: std::string tableName, size_t row, std::string &startX, std::string &endX); - void plotFocusedFile(bool plotSinglePeaks); + void plotFocusedFile(bool plotSinglePeaks, + Mantid::API::MatrixWorkspace_sptr focusedPeaksWS); void plotFitPeaksCurves(); @@ -145,8 +147,12 @@ protected slots: void fittingRunNoChanged(); private: + size_t findBankID(Mantid::API::MatrixWorkspace_sptr ws) const; + bool isDigit(const std::string &text) const; + void warnFileNotFound(const std::exception &ex); + // Methods related single peak fits virtual void startAsyncFittingWorker(const std::vector<std::string> &focusedRunNo, @@ -225,8 +231,14 @@ private: /// Associated view for this presenter (MVP pattern) IEnggDiffFittingView *const m_view; + /// Associated model for this presenter + MantidQT::CustomInterfaces::EnggDiffFittingModel m_model; + /// Holds if the view is in the process of being closed bool m_viewHasClosed; + + /// Handle the user selecting a different run to plot + void processSelectRun(); }; } // namespace CustomInterfaces diff --git a/qt/scientific_interfaces/EnggDiffraction/EnggDiffFittingViewQtWidget.cpp b/qt/scientific_interfaces/EnggDiffraction/EnggDiffFittingViewQtWidget.cpp index a24d6a2e1ce8424d48f94a57ced94c48f010caba..cc00e278ccb5ce34fd2ebd59f4a8ae533ba2e82b 100644 --- a/qt/scientific_interfaces/EnggDiffraction/EnggDiffFittingViewQtWidget.cpp +++ b/qt/scientific_interfaces/EnggDiffraction/EnggDiffFittingViewQtWidget.cpp @@ -86,11 +86,8 @@ void EnggDiffFittingViewQtWidget::doSetup() { connect(m_ui.lineEdit_pushButton_run_num, SIGNAL(textEdited(const QString &)), this, SLOT(resetFittingMode())); - connect(m_ui.lineEdit_pushButton_run_num, SIGNAL(editingFinished()), this, - SLOT(FittingRunNo())); - connect(m_ui.lineEdit_pushButton_run_num, SIGNAL(returnPressed()), this, - SLOT(FittingRunNo())); + SLOT(loadClicked())); connect(this, SIGNAL(getBanks()), this, SLOT(FittingRunNo())); @@ -126,6 +123,10 @@ void EnggDiffFittingViewQtWidget::doSetup() { connect(m_ui.pushButton_plot_separate_window, SIGNAL(released()), SLOT(plotSeparateWindow())); + connect(m_ui.listWidget_fitting_run_num, + SIGNAL(itemClicked(QListWidgetItem *)), this, + SLOT(listWidget_fitting_run_num_clicked(QListWidgetItem *))); + // Tool-tip button connect(m_ui.pushButton_tooltip, SIGNAL(released()), SLOT(showToolTipHelp())); @@ -276,6 +277,12 @@ void EnggDiffFittingViewQtWidget::listViewFittingRun() { } } +void EnggDiffFittingViewQtWidget::listWidget_fitting_run_num_clicked( + QListWidgetItem *clickedItem) { + const auto label = clickedItem->text(); + m_presenter->notify(IEnggDiffFittingPresenter::selectRun); +} + void EnggDiffFittingViewQtWidget::resetFittingMode() { // resets the global variable so the list view widgets // adds the run number to for single runs too @@ -459,17 +466,17 @@ void EnggDiffFittingViewQtWidget::browseFitFocusedRun() { std::string nexusFormat = "Nexus file with calibration table: NXS, NEXUS" "(*.nxs *.nexus);;"; - QString path( - QFileDialog::getOpenFileName(this, tr("Open Focused File "), prevPath, - QString::fromStdString(nexusFormat))); + QStringList paths( + QFileDialog::getOpenFileNames(this, tr("Open Focused File "), prevPath, + QString::fromStdString(nexusFormat))); - if (path.isEmpty()) { + if (paths.isEmpty()) { return; } - MantidQt::API::AlgorithmInputHistory::Instance().setPreviousDirectory(path); - setFittingRunNo(path.toStdString()); - getBanks(); + // MantidQt::API::AlgorithmInputHistory::Instance().setPreviousDirectory(paths[0]); + setFittingRunNo(paths.join(",").toStdString()); + // getBanks(); } void EnggDiffFittingViewQtWidget::setFittingRunNo(const std::string &path) { @@ -504,6 +511,11 @@ int EnggDiffFittingViewQtWidget::getFittingListWidgetCurrentRow() const { return m_ui.listWidget_fitting_run_num->currentRow(); } +std::string +EnggDiffFittingViewQtWidget::getFittingListWidgetCurrentValue() const { + return m_ui.listWidget_fitting_run_num->currentItem()->text().toStdString(); +} + void EnggDiffFittingViewQtWidget::setFittingListWidgetCurrentRow( int idx) const { m_ui.listWidget_fitting_run_num->setCurrentRow(idx); diff --git a/qt/scientific_interfaces/EnggDiffraction/EnggDiffFittingViewQtWidget.h b/qt/scientific_interfaces/EnggDiffraction/EnggDiffFittingViewQtWidget.h index 10c6c31e17dda70ac1e9017f930301e5cd783d00..3cfeef3885e8f1ec32f39b41fac008790c330d2a 100644 --- a/qt/scientific_interfaces/EnggDiffraction/EnggDiffFittingViewQtWidget.h +++ b/qt/scientific_interfaces/EnggDiffraction/EnggDiffFittingViewQtWidget.h @@ -107,6 +107,8 @@ public: int getFittingListWidgetCurrentRow() const override; + std::string getFittingListWidgetCurrentValue() const override; + void setFittingListWidgetCurrentRow(int idx) const override; std::string fittingPeaksData() const override; @@ -190,6 +192,7 @@ private slots: void showToolTipHelp(); void setBankDir(int idx); void listViewFittingRun(); + void listWidget_fitting_run_num_clicked(QListWidgetItem *listWidget); private: /// Setup the interface (tab UI) diff --git a/qt/scientific_interfaces/EnggDiffraction/EnggDiffractionQtTabFitting.ui b/qt/scientific_interfaces/EnggDiffraction/EnggDiffractionQtTabFitting.ui index 6ed8a033782e74f7c3703f23581ba38e5c86d190..7c50ba5e7e6a7d6f0dfc577b33d0a77bd92943df 100644 --- a/qt/scientific_interfaces/EnggDiffraction/EnggDiffractionQtTabFitting.ui +++ b/qt/scientific_interfaces/EnggDiffraction/EnggDiffractionQtTabFitting.ui @@ -373,7 +373,7 @@ </property> <property name="maximumSize"> <size> - <width>50</width> + <width>60</width> <height>16777215</height> </size> </property> diff --git a/qt/scientific_interfaces/EnggDiffraction/IEnggDiffFittingPresenter.h b/qt/scientific_interfaces/EnggDiffraction/IEnggDiffFittingPresenter.h index 11f2a885682c810a1d456d34545755602453017a..8c5d25fd1c42f84a9168994e56e7faf43188ea6b 100644 --- a/qt/scientific_interfaces/EnggDiffraction/IEnggDiffFittingPresenter.h +++ b/qt/scientific_interfaces/EnggDiffraction/IEnggDiffFittingPresenter.h @@ -52,6 +52,7 @@ public: savePeaks, ///< Save the peaks list ShutDown, ///< closing the interface LogMsg, ///< need to send a message to the Mantid log system + selectRun, ///< update plot with new run selected from list widget }; /** diff --git a/qt/scientific_interfaces/EnggDiffraction/IEnggDiffFittingView.h b/qt/scientific_interfaces/EnggDiffraction/IEnggDiffFittingView.h index c1638eaeae7dda02b3dc830d457db61081f731d0..8fe38b27bc9a9a06f2deb04a59e78a1c6d96d857 100644 --- a/qt/scientific_interfaces/EnggDiffraction/IEnggDiffFittingView.h +++ b/qt/scientific_interfaces/EnggDiffraction/IEnggDiffFittingView.h @@ -191,6 +191,11 @@ public: */ virtual int getFittingListWidgetCurrentRow() const = 0; + /** + * @return The text on the current selected row of the list widget + */ + virtual std::string getFittingListWidgetCurrentValue() const = 0; + /** * Sets the current row of the fitting list widget * diff --git a/qt/scientific_interfaces/General/CMakeLists.txt b/qt/scientific_interfaces/General/CMakeLists.txt index 04d87b24ddcc6c48e016bd5335525204ae615e2d..527a2876ce5f98bce4aa6e55f04ebd0a5d916ddc 100644 --- a/qt/scientific_interfaces/General/CMakeLists.txt +++ b/qt/scientific_interfaces/General/CMakeLists.txt @@ -63,8 +63,3 @@ mtd_add_qt_library (TARGET_NAME MantidScientificInterfacesGeneral OSX_INSTALL_RPATH @loader_path/../../Contents/MacOS ) - -if ( MSVC_IDE ) -# # Add to the 'ScientificInterfaces' group in VS - set_property ( TARGET MantidScientificInterfacesGeneralQt4 PROPERTY FOLDER "ScientificInterfaces" ) -endif() diff --git a/qt/scientific_interfaces/ISISReflectometry/CMakeLists.txt b/qt/scientific_interfaces/ISISReflectometry/CMakeLists.txt index a08497c63bf51f2237ccaf46e24d2f2b81f8d3e7..7f418e9aa0c0f1c439a9b18e53fccc6c2269d79c 100644 --- a/qt/scientific_interfaces/ISISReflectometry/CMakeLists.txt +++ b/qt/scientific_interfaces/ISISReflectometry/CMakeLists.txt @@ -120,8 +120,3 @@ mtd_add_qt_library (TARGET_NAME MantidScientificInterfacesISISReflectometry OSX_INSTALL_RPATH @loader_path/../../Contents/MacOS ) - -if ( MSVC_IDE ) -# # Add to the 'ScientificInterfaces' group in VS - set_property ( TARGET MantidScientificInterfacesISISReflectometryQt4 PROPERTY FOLDER "ScientificInterfaces" ) -endif() diff --git a/qt/scientific_interfaces/ISISSANS/CMakeLists.txt b/qt/scientific_interfaces/ISISSANS/CMakeLists.txt index ca813efd77391f7d4db591050bcb8eff9231b7ca..1a1ef18afcddebbfe3cf7e06da196adb6750f509 100644 --- a/qt/scientific_interfaces/ISISSANS/CMakeLists.txt +++ b/qt/scientific_interfaces/ISISSANS/CMakeLists.txt @@ -67,8 +67,3 @@ mtd_add_qt_library (TARGET_NAME MantidScientificInterfacesISISSANS @loader_path/../../Contents/MacOS @loader_path/../../plugins/qt4 ) - -if ( MSVC_IDE ) -# # Add to the 'ScientificInterfaces' group in VS - set_property ( TARGET MantidScientificInterfacesISISSANSQt4 PROPERTY FOLDER "ScientificInterfaces" ) -endif() diff --git a/qt/scientific_interfaces/Indirect/CMakeLists.txt b/qt/scientific_interfaces/Indirect/CMakeLists.txt index d65cae357e47891e037a1e97f795a0b36a4e8ef3..500aef356aab3ee169534937fd53f3bc39859357 100644 --- a/qt/scientific_interfaces/Indirect/CMakeLists.txt +++ b/qt/scientific_interfaces/Indirect/CMakeLists.txt @@ -194,8 +194,3 @@ mtd_add_qt_library (TARGET_NAME MantidScientificInterfacesIndirect @loader_path/../../Contents/MacOS @loader_path/../../plugins/qt4 ) - -if ( MSVC_IDE ) -# # Add to the 'ScientificInterfaces' group in VS - set_property ( TARGET MantidScientificInterfacesIndirectQt4 PROPERTY FOLDER "ScientificInterfaces" ) -endif() diff --git a/qt/scientific_interfaces/Indirect/ConvFit.ui b/qt/scientific_interfaces/Indirect/ConvFit.ui index dd727097bc94b46b99a5f8509bddf7894f99d17e..5c02c806133413222abe20f7396116158059ce76 100644 --- a/qt/scientific_interfaces/Indirect/ConvFit.ui +++ b/qt/scientific_interfaces/Indirect/ConvFit.ui @@ -6,8 +6,8 @@ <rect> <x>0</x> <y>0</y> - <width>963</width> - <height>679</height> + <width>566</width> + <height>447</height> </rect> </property> <property name="windowTitle"> @@ -117,8 +117,14 @@ </item> <item> <layout class="QHBoxLayout" name="loPlotArea"> + <property name="sizeConstraint"> + <enum>QLayout::SetNoConstraint</enum> + </property> <item> <layout class="QVBoxLayout" name="loProperties"> + <property name="sizeConstraint"> + <enum>QLayout::SetNoConstraint</enum> + </property> <item> <layout class="QVBoxLayout" name="loOptions"> <item> @@ -254,42 +260,90 @@ </layout> </item> <item> - <layout class="QVBoxLayout" name="properties"/> + <layout class="QVBoxLayout" name="properties"> + <property name="sizeConstraint"> + <enum>QLayout::SetNoConstraint</enum> + </property> + </layout> </item> </layout> </item> <item> <layout class="QVBoxLayout" name="loMiniPlot"> + <property name="sizeConstraint"> + <enum>QLayout::SetNoConstraint</enum> + </property> <item> - <widget class="MantidQt::MantidWidgets::PreviewPlot" name="ppPlotTop" native="true"> - <property name="showLegend" stdset="0"> - <bool>true</bool> - </property> - <property name="canvasColour" stdset="0"> - <color> - <red>255</red> - <green>255</green> - <blue>255</blue> - </color> - </property> - </widget> - </item> - <item> - <widget class="MantidQt::MantidWidgets::PreviewPlot" name="ppPlotBottom" native="true"> - <property name="showLegend" stdset="0"> - <bool>true</bool> - </property> - <property name="canvasColour" stdset="0"> - <color> - <red>255</red> - <green>255</green> - <blue>255</blue> - </color> + <layout class="QVBoxLayout" name="loPlots" stretch="5,3"> + <property name="sizeConstraint"> + <enum>QLayout::SetDefaultConstraint</enum> </property> - </widget> + <item> + <widget class="MantidQt::MantidWidgets::PreviewPlot" name="ppPlotTop" native="true"> + <property name="sizePolicy"> + <sizepolicy hsizetype="Preferred" vsizetype="Expanding"> + <horstretch>0</horstretch> + <verstretch>1</verstretch> + </sizepolicy> + </property> + <property name="minimumSize"> + <size> + <width>0</width> + <height>125</height> + </size> + </property> + <property name="showLegend" stdset="0"> + <bool>true</bool> + </property> + <property name="canvasColour" stdset="0"> + <color> + <red>255</red> + <green>255</green> + <blue>255</blue> + </color> + </property> + </widget> + </item> + <item> + <widget class="MantidQt::MantidWidgets::PreviewPlot" name="ppPlotBottom" native="true"> + <property name="sizePolicy"> + <sizepolicy hsizetype="Preferred" vsizetype="Expanding"> + <horstretch>0</horstretch> + <verstretch>1</verstretch> + </sizepolicy> + </property> + <property name="minimumSize"> + <size> + <width>0</width> + <height>75</height> + </size> + </property> + <property name="maximumSize"> + <size> + <width>16777215</width> + <height>16777215</height> + </size> + </property> + <property name="showLegend" stdset="0"> + <bool>true</bool> + </property> + <property name="canvasColour" stdset="0"> + <color> + <red>255</red> + <green>255</green> + <blue>255</blue> + </color> + </property> + <zorder>ppPlotTop</zorder> + </widget> + </item> + </layout> </item> <item> <layout class="QHBoxLayout" name="loPlotOptions"> + <property name="sizeConstraint"> + <enum>QLayout::SetNoConstraint</enum> + </property> <item> <widget class="QPushButton" name="pbSingleFit"> <property name="text"> @@ -321,6 +375,9 @@ </item> <item> <layout class="QHBoxLayout" name="loSpectra"> + <property name="sizeConstraint"> + <enum>QLayout::SetNoConstraint</enum> + </property> <item> <widget class="QLabel" name="lbPlotSpectrum"> <property name="text"> diff --git a/qt/scientific_interfaces/Indirect/Elwin.ui b/qt/scientific_interfaces/Indirect/Elwin.ui index 084c09ec366e75e4849e3a914ef1d884fdd0c04e..e8d3b1379dea2b544fc6c2c36ae318b0291284c3 100644 --- a/qt/scientific_interfaces/Indirect/Elwin.ui +++ b/qt/scientific_interfaces/Indirect/Elwin.ui @@ -6,8 +6,8 @@ <rect> <x>0</x> <y>0</y> - <width>869</width> - <height>445</height> + <width>581</width> + <height>651</height> </rect> </property> <property name="windowTitle"> @@ -101,15 +101,44 @@ </layout> </item> <item> - <widget class="MantidQt::MantidWidgets::PreviewPlot" name="ppPlot" native="true"> - <property name="canvasColour" stdset="0"> - <color> - <red>255</red> - <green>255</green> - <blue>255</blue> - </color> - </property> - </widget> + <layout class="QVBoxLayout" name="loPlots" stretch="5,3"> + <item> + <widget class="MantidQt::MantidWidgets::PreviewPlot" name="ppPlot" native="true"> + <property name="sizePolicy"> + <sizepolicy hsizetype="Preferred" vsizetype="MinimumExpanding"> + <horstretch>0</horstretch> + <verstretch>1</verstretch> + </sizepolicy> + </property> + <property name="minimumSize"> + <size> + <width>0</width> + <height>125</height> + </size> + </property> + <property name="canvasColour" stdset="0"> + <color> + <red>255</red> + <green>255</green> + <blue>255</blue> + </color> + </property> + </widget> + </item> + <item> + <spacer name="verticalSpacer"> + <property name="orientation"> + <enum>Qt::Vertical</enum> + </property> + <property name="sizeHint" stdset="0"> + <size> + <width>20</width> + <height>40</height> + </size> + </property> + </spacer> + </item> + </layout> </item> </layout> </item> diff --git a/qt/scientific_interfaces/Indirect/IndirectDataAnalysis.ui b/qt/scientific_interfaces/Indirect/IndirectDataAnalysis.ui index ff15d9e28daecb2774c91d8159e394fab3713440..9229c657321f08fb6eb8aea25367fa2757dfba6a 100644 --- a/qt/scientific_interfaces/Indirect/IndirectDataAnalysis.ui +++ b/qt/scientific_interfaces/Indirect/IndirectDataAnalysis.ui @@ -6,20 +6,50 @@ <rect> <x>0</x> <y>0</y> - <width>700</width> - <height>600</height> + <width>600</width> + <height>625</height> </rect> </property> + <property name="sizePolicy"> + <sizepolicy hsizetype="MinimumExpanding" vsizetype="MinimumExpanding"> + <horstretch>0</horstretch> + <verstretch>0</verstretch> + </sizepolicy> + </property> + <property name="minimumSize"> + <size> + <width>200</width> + <height>200</height> + </size> + </property> <property name="windowTitle"> <string>Indirect Data Analysis</string> </property> <widget class="QWidget" name="centralwidget"> + <property name="sizePolicy"> + <sizepolicy hsizetype="MinimumExpanding" vsizetype="MinimumExpanding"> + <horstretch>0</horstretch> + <verstretch>0</verstretch> + </sizepolicy> + </property> + <property name="minimumSize"> + <size> + <width>200</width> + <height>200</height> + </size> + </property> <layout class="QVBoxLayout" name="verticalLayout"> <item> <widget class="QTabWidget" name="twIDATabs"> <property name="enabled"> <bool>true</bool> </property> + <property name="minimumSize"> + <size> + <width>0</width> + <height>0</height> + </size> + </property> <property name="tabShape"> <enum>QTabWidget::Rounded</enum> </property> @@ -51,11 +81,11 @@ <string>ConvFit</string> </attribute> </widget> - <widget class="QWidget" name="tabJumpFit"> - <attribute name="title"> - <string>JumpFit</string> - </attribute> - </widget> + <widget class="QWidget" name="tabJumpFit"> + <attribute name="title"> + <string>JumpFit</string> + </attribute> + </widget> </widget> </item> <item> diff --git a/qt/scientific_interfaces/Indirect/Iqt.ui b/qt/scientific_interfaces/Indirect/Iqt.ui index d03a99747d25e0c7ce6a54065733ac6d2909dcd3..d63cb92889fb4149912ccf9edf59b1e5f45868fc 100644 --- a/qt/scientific_interfaces/Indirect/Iqt.ui +++ b/qt/scientific_interfaces/Indirect/Iqt.ui @@ -119,18 +119,47 @@ <layout class="QVBoxLayout" name="properties"/> </item> <item> - <widget class="MantidQt::MantidWidgets::PreviewPlot" name="ppPlot" native="true"> - <property name="canvasColour" stdset="0"> - <color> - <red>255</red> - <green>255</green> - <blue>255</blue> - </color> - </property> - <property name="showLegend" stdset="0"> - <bool>true</bool> - </property> - </widget> + <layout class="QVBoxLayout" name="verticalLayout" stretch="5,3"> + <item> + <widget class="MantidQt::MantidWidgets::PreviewPlot" name="ppPlot" native="true"> + <property name="sizePolicy"> + <sizepolicy hsizetype="Preferred" vsizetype="MinimumExpanding"> + <horstretch>0</horstretch> + <verstretch>1</verstretch> + </sizepolicy> + </property> + <property name="minimumSize"> + <size> + <width>0</width> + <height>125</height> + </size> + </property> + <property name="canvasColour" stdset="0"> + <color> + <red>255</red> + <green>255</green> + <blue>255</blue> + </color> + </property> + <property name="showLegend" stdset="0"> + <bool>true</bool> + </property> + </widget> + </item> + <item> + <spacer name="verticalSpacer"> + <property name="orientation"> + <enum>Qt::Vertical</enum> + </property> + <property name="sizeHint" stdset="0"> + <size> + <width>20</width> + <height>40</height> + </size> + </property> + </spacer> + </item> + </layout> </item> </layout> </item> diff --git a/qt/scientific_interfaces/Indirect/IqtFit.ui b/qt/scientific_interfaces/Indirect/IqtFit.ui index 739c8c3058500ae4eae9ea5b180da678dcd3ba22..9a78efb73b132deca6ed3835ac27d62b09a94079 100644 --- a/qt/scientific_interfaces/Indirect/IqtFit.ui +++ b/qt/scientific_interfaces/Indirect/IqtFit.ui @@ -6,8 +6,8 @@ <rect> <x>0</x> <y>0</y> - <width>777</width> - <height>579</height> + <width>581</width> + <height>533</height> </rect> </property> <property name="windowTitle"> @@ -139,32 +139,60 @@ <item> <layout class="QVBoxLayout" name="verticalLayout_6"> <item> - <widget class="MantidQt::MantidWidgets::PreviewPlot" name="ppPlotTop" native="true"> - <property name="showLegend" stdset="0"> - <bool>true</bool> - </property> - <property name="canvasColour" stdset="0"> - <color> - <red>255</red> - <green>255</green> - <blue>255</blue> - </color> - </property> - </widget> - </item> - <item> - <widget class="MantidQt::MantidWidgets::PreviewPlot" name="ppPlotBottom" native="true"> - <property name="showLegend" stdset="0"> - <bool>true</bool> - </property> - <property name="canvasColour" stdset="0"> - <color> - <red>255</red> - <green>255</green> - <blue>255</blue> - </color> - </property> - </widget> + <layout class="QVBoxLayout" name="loPlots" stretch="5,3"> + <item> + <widget class="MantidQt::MantidWidgets::PreviewPlot" name="ppPlotTop" native="true"> + <property name="sizePolicy"> + <sizepolicy hsizetype="Preferred" vsizetype="MinimumExpanding"> + <horstretch>0</horstretch> + <verstretch>1</verstretch> + </sizepolicy> + </property> + <property name="minimumSize"> + <size> + <width>0</width> + <height>125</height> + </size> + </property> + <property name="showLegend" stdset="0"> + <bool>true</bool> + </property> + <property name="canvasColour" stdset="0"> + <color> + <red>255</red> + <green>255</green> + <blue>255</blue> + </color> + </property> + </widget> + </item> + <item> + <widget class="MantidQt::MantidWidgets::PreviewPlot" name="ppPlotBottom" native="true"> + <property name="sizePolicy"> + <sizepolicy hsizetype="Preferred" vsizetype="MinimumExpanding"> + <horstretch>0</horstretch> + <verstretch>1</verstretch> + </sizepolicy> + </property> + <property name="minimumSize"> + <size> + <width>0</width> + <height>75</height> + </size> + </property> + <property name="showLegend" stdset="0"> + <bool>true</bool> + </property> + <property name="canvasColour" stdset="0"> + <color> + <red>255</red> + <green>255</green> + <blue>255</blue> + </color> + </property> + </widget> + </item> + </layout> </item> <item> <layout class="QHBoxLayout" name="gpPlotOptions"> diff --git a/qt/scientific_interfaces/Indirect/JumpFit.ui b/qt/scientific_interfaces/Indirect/JumpFit.ui index 0337e8ce98ae9fabe84506e6d8badc5a014c6edc..e300b2913dca6cdb2be1f89551aa29c25dd2b052 100644 --- a/qt/scientific_interfaces/Indirect/JumpFit.ui +++ b/qt/scientific_interfaces/Indirect/JumpFit.ui @@ -120,9 +120,21 @@ <layout class="QVBoxLayout" name="treeSpace"/> </item> <item> - <layout class="QVBoxLayout" name="verticalLayout_2"> + <layout class="QVBoxLayout" name="loPlots" stretch="5,3"> <item> <widget class="MantidQt::MantidWidgets::PreviewPlot" name="ppPlotTop" native="true"> + <property name="sizePolicy"> + <sizepolicy hsizetype="Preferred" vsizetype="MinimumExpanding"> + <horstretch>0</horstretch> + <verstretch>1</verstretch> + </sizepolicy> + </property> + <property name="minimumSize"> + <size> + <width>0</width> + <height>125</height> + </size> + </property> <property name="canvasColour" stdset="0"> <color> <red>255</red> @@ -142,6 +154,18 @@ </item> <item> <widget class="MantidQt::MantidWidgets::PreviewPlot" name="ppPlotBottom" native="true"> + <property name="sizePolicy"> + <sizepolicy hsizetype="Preferred" vsizetype="MinimumExpanding"> + <horstretch>0</horstretch> + <verstretch>1</verstretch> + </sizepolicy> + </property> + <property name="minimumSize"> + <size> + <width>0</width> + <height>75</height> + </size> + </property> <property name="showLegend" stdset="0"> <bool>true</bool> </property> diff --git a/qt/scientific_interfaces/Indirect/MSDFit.ui b/qt/scientific_interfaces/Indirect/MSDFit.ui index 7500cdfb51f79b477ea927e0802042b5212c00b2..dfc78073f9d7549b9e2633ad2912a9cfede37194 100644 --- a/qt/scientific_interfaces/Indirect/MSDFit.ui +++ b/qt/scientific_interfaces/Indirect/MSDFit.ui @@ -6,8 +6,8 @@ <rect> <x>0</x> <y>0</y> - <width>949</width> - <height>734</height> + <width>522</width> + <height>571</height> </rect> </property> <property name="windowTitle"> @@ -75,9 +75,24 @@ </layout> </item> <item> - <layout class="QVBoxLayout" name="loPlotsLayout"> + <layout class="QVBoxLayout" name="loPlotsLayout" stretch="5,3"> + <property name="sizeConstraint"> + <enum>QLayout::SetNoConstraint</enum> + </property> <item> <widget class="MantidQt::MantidWidgets::PreviewPlot" name="ppPlotTop" native="true"> + <property name="sizePolicy"> + <sizepolicy hsizetype="Preferred" vsizetype="Expanding"> + <horstretch>0</horstretch> + <verstretch>1</verstretch> + </sizepolicy> + </property> + <property name="minimumSize"> + <size> + <width>0</width> + <height>125</height> + </size> + </property> <property name="showLegend" stdset="0"> <bool>true</bool> </property> @@ -97,6 +112,18 @@ </item> <item> <widget class="MantidQt::MantidWidgets::PreviewPlot" name="ppPlotBottom" native="true"> + <property name="sizePolicy"> + <sizepolicy hsizetype="Preferred" vsizetype="Expanding"> + <horstretch>1</horstretch> + <verstretch>0</verstretch> + </sizepolicy> + </property> + <property name="minimumSize"> + <size> + <width>0</width> + <height>75</height> + </size> + </property> <property name="showLegend" stdset="0"> <bool>true</bool> </property> diff --git a/qt/scientific_interfaces/MultiDatasetFit/CMakeLists.txt b/qt/scientific_interfaces/MultiDatasetFit/CMakeLists.txt index d7cd8395f8dcaa453db20cec40519256a73e4901..9c403e3c37ba47be2ab5fa11c3dae6c84141bd3c 100644 --- a/qt/scientific_interfaces/MultiDatasetFit/CMakeLists.txt +++ b/qt/scientific_interfaces/MultiDatasetFit/CMakeLists.txt @@ -70,8 +70,3 @@ mtd_add_qt_library (TARGET_NAME MantidScientificInterfacesMultiDatasetFit OSX_INSTALL_RPATH @loader_path/../../Contents/MacOS ) - -if ( MSVC_IDE ) -# # Add to the 'ScientificInterfaces' group in VS - set_property ( TARGET MantidScientificInterfacesMultiDatasetFitQt4 PROPERTY FOLDER "ScientificInterfaces" ) -endif() diff --git a/qt/scientific_interfaces/Muon/CMakeLists.txt b/qt/scientific_interfaces/Muon/CMakeLists.txt index 7b796e65e03fef64e2cc1659d52c014fa2980158..f8c390d5e06514b6a07f916b1d043979315f19de 100644 --- a/qt/scientific_interfaces/Muon/CMakeLists.txt +++ b/qt/scientific_interfaces/Muon/CMakeLists.txt @@ -110,8 +110,3 @@ mtd_add_qt_library (TARGET_NAME MantidScientificInterfacesMuon @loader_path/../../Contents/MacOS @loader_path/../../plugins/qt4 ) - -if ( MSVC_IDE ) -# # Add to the 'ScientificInterfaces' group in VS - set_property ( TARGET MantidScientificInterfacesMuonQt4 PROPERTY FOLDER "ScientificInterfaces" ) -endif() diff --git a/qt/scientific_interfaces/test/EnggDiffFittingModelTest.h b/qt/scientific_interfaces/test/EnggDiffFittingModelTest.h new file mode 100644 index 0000000000000000000000000000000000000000..c9c4837854e491533172322acffa59099f8c24b4 --- /dev/null +++ b/qt/scientific_interfaces/test/EnggDiffFittingModelTest.h @@ -0,0 +1,86 @@ +#ifndef MANTID_CUSTOMINTERFACES_ENGGDIFFFITTINGMODELTEST_H_ +#define MANTID_CUSTOMINTERFACES_ENGGDIFFFITTINGMODELTEST_H_ + +#include <cxxtest/TestSuite.h> + +#include "MantidAPI/MatrixWorkspace.h" +#include "MantidAPI/WorkspaceFactory.h" + +#include "../EnggDiffraction/EnggDiffFittingModel.h" + +#include <vector> + +// Lets us have pairs inside assertion macros +typedef std::pair<int, size_t> RunBankPair; + +using namespace Mantid; +using namespace MantidQT::CustomInterfaces; + +namespace { + +void addSampleWorkspaceToModel(const int runNumber, const int bank, + EnggDiffFittingModel &model) { + API::MatrixWorkspace_sptr ws = + API::WorkspaceFactory::Instance().create("Workspace2D", 1, 10, 10); + model.addWorkspace(runNumber, bank, ws); +} + +} // anonymous namespace + +class EnggDiffFittingModelTest : public CxxTest::TestSuite { +public: + // This pair of boilerplate methods prevent the suite being created statically + // This means the constructor isn't called when running other tests + static EnggDiffFittingModelTest *createSuite() { + return new EnggDiffFittingModelTest(); + } + static void destroySuite(EnggDiffFittingModelTest *suite) { delete suite; } + + void test_addAndGetWorkspace() { + auto model = EnggDiffFittingModel(); + API::MatrixWorkspace_sptr ws = + API::WorkspaceFactory::Instance().create("Workspace2D", 1, 10, 10); + const int runNumber = 100; + const int bank = 1; + TS_ASSERT_THROWS_NOTHING(model.addWorkspace(runNumber, bank, ws)); + const auto retrievedWS = model.getWorkspace(runNumber, bank); + + TS_ASSERT(retrievedWS != nullptr); + TS_ASSERT_EQUALS(ws, retrievedWS); + } + + void test_getAllRunNumbers() { + auto model = EnggDiffFittingModel(); + + addSampleWorkspaceToModel(123, 1, model); + addSampleWorkspaceToModel(456, 2, model); + addSampleWorkspaceToModel(789, 1, model); + addSampleWorkspaceToModel(123, 2, model); + + const auto runNumbers = model.getAllRunNumbers(); + + TS_ASSERT_EQUALS(runNumbers.size(), 3); + TS_ASSERT_EQUALS(runNumbers[0], 123); + TS_ASSERT_EQUALS(runNumbers[1], 456); + TS_ASSERT_EQUALS(runNumbers[2], 789); + } + + void test_getRunNumbersAndBankIDs() { + auto model = EnggDiffFittingModel(); + + addSampleWorkspaceToModel(123, 1, model); + addSampleWorkspaceToModel(456, 2, model); + addSampleWorkspaceToModel(789, 1, model); + addSampleWorkspaceToModel(123, 2, model); + + const auto runNoBankPairs = model.getRunNumbersAndBanksIDs(); + + TS_ASSERT_EQUALS(runNoBankPairs.size(), 4); + TS_ASSERT_EQUALS(runNoBankPairs[0], RunBankPair(123, 1)); + TS_ASSERT_EQUALS(runNoBankPairs[1], RunBankPair(123, 2)); + TS_ASSERT_EQUALS(runNoBankPairs[2], RunBankPair(456, 2)); + TS_ASSERT_EQUALS(runNoBankPairs[3], RunBankPair(789, 1)); + } +}; + +#endif \ No newline at end of file diff --git a/qt/scientific_interfaces/test/EnggDiffFittingViewMock.h b/qt/scientific_interfaces/test/EnggDiffFittingViewMock.h index c244b4d9bfa2af4b9a52918c54cebe1e0aa398db..5356e30ee18b5aa36be7ecd517adf151fc3ddbc5 100644 --- a/qt/scientific_interfaces/test/EnggDiffFittingViewMock.h +++ b/qt/scientific_interfaces/test/EnggDiffFittingViewMock.h @@ -99,6 +99,9 @@ public: // sets the current row of the fitting list widget MOCK_CONST_METHOD1(setFittingListWidgetCurrentRow, void(int idx)); + // gets current value of the fitting list widget + MOCK_CONST_METHOD0(getFittingListWidgetCurrentValue, std::string()); + // sets the peak list according to the QString given MOCK_CONST_METHOD1(setPeakList, void(const std::string &peakList)); diff --git a/qt/widgets/legacyqwt/inc/MantidQtWidgets/LegacyQwt/PreviewPlot.ui b/qt/widgets/legacyqwt/inc/MantidQtWidgets/LegacyQwt/PreviewPlot.ui index 26b0ff597164b66bb1b0e39405c096f6601fd4ce..84ea0db811c1c1f967055160a752c262ccf5719b 100644 --- a/qt/widgets/legacyqwt/inc/MantidQtWidgets/LegacyQwt/PreviewPlot.ui +++ b/qt/widgets/legacyqwt/inc/MantidQtWidgets/LegacyQwt/PreviewPlot.ui @@ -6,8 +6,8 @@ <rect> <x>0</x> <y>0</y> - <width>300</width> - <height>259</height> + <width>96</width> + <height>36</height> </rect> </property> <property name="windowTitle"> @@ -27,6 +27,7 @@ <class>QwtPlot</class> <extends>QFrame</extends> <header>qwt_plot.h</header> + <container>1</container> </customwidget> </customwidgets> <resources/> diff --git a/qt/widgets/sliceviewer/CMakeLists.txt b/qt/widgets/sliceviewer/CMakeLists.txt index 1c28881fa3a90567328873d65c5adfe3a390161f..597f9066dab5d3cd0f27eddc7356c51fefa2657c 100644 --- a/qt/widgets/sliceviewer/CMakeLists.txt +++ b/qt/widgets/sliceviewer/CMakeLists.txt @@ -195,5 +195,5 @@ set ( TEST_PY_FILES ) if ( PYUNITTEST_FOUND ) - pyunittest_add_test (${CMAKE_CURRENT_SOURCE_DIR}/test python.test ${TEST_PY_FILES} ) + pyunittest_add_test (${CMAKE_CURRENT_SOURCE_DIR} python.test ${TEST_PY_FILES} ) endif () diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt index 8bcb5b217cd908bede84e175a511231f78d99e9d..1620a4dbd752066953d234418d34a9d9c69ce832 100644 --- a/scripts/CMakeLists.txt +++ b/scripts/CMakeLists.txt @@ -84,9 +84,7 @@ add_subdirectory(test/Muon) # python unit tests if (PYUNITTEST_FOUND) - pyunittest_add_test ( ${CMAKE_CURRENT_SOURCE_DIR}/test python.scripts ${TEST_PY_FILES} ) - # Trying to add tested modules to python system path - #set (ENV{PYTHONPATH} "ENV{PYTHONPATH} ${CMAKE_CURRENT_SOURCE_DIR}/Inelastic" ) + pyunittest_add_test ( ${CMAKE_CURRENT_SOURCE_DIR} python.scripts ${TEST_PY_FILES} ) endif () # Ensure we don't get stale pyc files around diff --git a/scripts/HFIR_4Circle_Reduction/CMakeLists.txt b/scripts/HFIR_4Circle_Reduction/CMakeLists.txt index f2ee63d7d3f806a6aac249d25c4d653aa7f8df05..9e79b806c6355dc13d2cc8dd88c6338dce3c58f5 100644 --- a/scripts/HFIR_4Circle_Reduction/CMakeLists.txt +++ b/scripts/HFIR_4Circle_Reduction/CMakeLists.txt @@ -11,6 +11,7 @@ set( UI_FILES UBSelectPeaksDialog.ui AddUBPeaksDialog.ui PeakIntegrationSpreadSheet.ui + httpserversetup.ui preprocess_window.ui peak_integration_info.ui ) diff --git a/scripts/HFIR_4Circle_Reduction/MainWindow.ui b/scripts/HFIR_4Circle_Reduction/MainWindow.ui index 24ae9fe69dd9ccb9ffd0849d3bc025974ac33037..55023b024559ee61a2876e43aa53699bb3712c4b 100644 --- a/scripts/HFIR_4Circle_Reduction/MainWindow.ui +++ b/scripts/HFIR_4Circle_Reduction/MainWindow.ui @@ -220,114 +220,17 @@ <string>Setup && Data Access</string> </attribute> <layout class="QGridLayout" name="gridLayout_5"> - <item row="3" column="0"> - <spacer name="verticalSpacer_4"> - <property name="orientation"> - <enum>Qt::Vertical</enum> - </property> - <property name="sizeType"> - <enum>QSizePolicy::Preferred</enum> - </property> - <property name="sizeHint" stdset="0"> - <size> - <width>20</width> - <height>40</height> - </size> - </property> - </spacer> - </item> - <item row="1" column="0"> - <widget class="QTextEdit" name="textEdit"> - <property name="enabled"> - <bool>true</bool> - </property> - <property name="sizePolicy"> - <sizepolicy hsizetype="Expanding" vsizetype="Preferred"> - <horstretch>0</horstretch> - <verstretch>0</verstretch> - </sizepolicy> - </property> - <property name="readOnly"> - <bool>true</bool> - </property> - <property name="html"> - <string><!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> -<html><head><meta name="qrichtext" content="1" /><style type="text/css"> -p, li { white-space: pre-wrap; } -</style></head><body style=" font-family:'.SF NS Text'; font-size:13pt; font-weight:400; font-style:normal;"> -<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'Ubuntu'; font-size:14pt; font-weight:600; font-style:italic;">DON'T PANIC! IT IS </span><span style=" font-family:'Ubuntu'; font-size:7pt; font-style:italic; vertical-align:sub;">(supposed to be)</span><span style=" font-family:'Ubuntu'; font-size:14pt; font-weight:600; font-style:italic;"> EASY TO USE!</span></p> -<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:'Ubuntu'; font-size:10pt; font-weight:600;"><br /></p> -<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'Ubuntu'; font-size:10pt; font-weight:600;">1. Configure the data reduction</span></p> -<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'Ubuntu'; font-size:10pt; font-style:italic;">(a) Do not modify ServerURL unless necessary;</span></p> -<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'Ubuntu'; font-size:10pt; font-style:italic;">(b) Click 'Apply' to check internet connection and directories;</span></p> -<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'Ubuntu'; font-size:10pt; font-style:italic;">(c) Click 'Test' to test whether the server works.</span></p> -<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:'Ubuntu'; font-size:10pt; font-style:italic;"><br /></p> -<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'Ubuntu'; font-size:10pt; font-weight:600;">2. Set experiment and calibration parameters</span></p> -<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'Ubuntu'; font-size:10pt; font-style:italic;">(a) Set Experiment;</span></p> -<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'Ubuntu'; font-size:10pt; font-style:italic;">(b) If sample distance is not 0.375 m, set it up;</span></p> -<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-family:'Ubuntu'; font-size:10pt; font-style:italic;">(c) If wave length is not standard, set it up;</span></p></body></html></string> - </property> - </widget> - </item> - <item row="6" column="0"> - <spacer name="verticalSpacer_2"> - <property name="orientation"> - <enum>Qt::Vertical</enum> - </property> - <property name="sizeHint" stdset="0"> - <size> - <width>20</width> - <height>40</height> - </size> - </property> - </spacer> - </item> - <item row="0" column="0"> - <widget class="QGroupBox" name="groupBox"> + <item row="5" column="0"> + <widget class="QGroupBox" name="groupBox_11"> <property name="title"> - <string>Configuration</string> + <string>Instrument Calibration</string> </property> - <layout class="QGridLayout" name="gridLayout_11"> - <item row="1" column="2"> - <widget class="QPushButton" name="pushButton_testURLs"> - <property name="sizePolicy"> - <sizepolicy hsizetype="Preferred" vsizetype="Fixed"> - <horstretch>0</horstretch> - <verstretch>0</verstretch> - </sizepolicy> - </property> - <property name="minimumSize"> - <size> - <width>100</width> - <height>0</height> - </size> - </property> - <property name="maximumSize"> - <size> - <width>200</width> - <height>16777215</height> - </size> - </property> - <property name="text"> - <string>Test</string> - </property> - </widget> - </item> - <item row="2" column="2"> - <widget class="QPushButton" name="pushButton_browseLocalDataDir"> - <property name="text"> - <string>Browse</string> - </property> - </widget> - </item> - <item row="2" column="3"> - <spacer name="horizontalSpacer"> + <layout class="QGridLayout" name="gridLayout_16"> + <item row="0" column="11"> + <spacer name="horizontalSpacer_26"> <property name="orientation"> <enum>Qt::Horizontal</enum> </property> - <property name="sizeType"> - <enum>QSizePolicy::Preferred</enum> - </property> <property name="sizeHint" stdset="0"> <size> <width>40</width> @@ -336,308 +239,69 @@ p, li { white-space: pre-wrap; } </property> </spacer> </item> - <item row="2" column="1"> - <widget class="QLineEdit" name="lineEdit_localSpiceDir"> - <property name="sizePolicy"> - <sizepolicy hsizetype="Expanding" vsizetype="Fixed"> - <horstretch>0</horstretch> - <verstretch>0</verstretch> - </sizepolicy> - </property> - <property name="toolTip"> - <string><html><head/><body><p>Directory for local data storage</p></body></html></string> - </property> - </widget> - </item> - <item row="0" column="0"> - <widget class="QLabel" name="label_instrument"> - <property name="sizePolicy"> - <sizepolicy hsizetype="Fixed" vsizetype="Preferred"> - <horstretch>0</horstretch> - <verstretch>0</verstretch> - </sizepolicy> - </property> - <property name="text"> - <string>Instrument</string> - </property> - </widget> - </item> - <item row="2" column="0"> - <widget class="QLabel" name="label_dir"> - <property name="sizePolicy"> - <sizepolicy hsizetype="Fixed" vsizetype="Preferred"> - <horstretch>0</horstretch> - <verstretch>0</verstretch> - </sizepolicy> + <item row="1" column="11"> + <spacer name="horizontalSpacer_16"> + <property name="orientation"> + <enum>Qt::Horizontal</enum> </property> - <property name="minimumSize"> + <property name="sizeHint" stdset="0"> <size> - <width>80</width> - <height>0</height> + <width>40</width> + <height>20</height> </size> </property> - <property name="toolTip"> - <string><html><head/><body><p>Directory for local data storage</p></body></html></string> - </property> - <property name="text"> - <string>Data Directory</string> - </property> - <property name="alignment"> - <set>Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter</set> - </property> - </widget> + </spacer> </item> <item row="0" column="1"> - <widget class="QComboBox" name="comboBox_instrument"> - <item> - <property name="text"> - <string>HB3A</string> - </property> - </item> - </widget> - </item> - <item row="0" column="2"> - <widget class="QPushButton" name="pushButton_useDefaultDir"> - <property name="font"> - <font> - <pointsize>10</pointsize> - </font> - </property> - <property name="toolTip"> - <string><html><head/><body><p>Use default set up</p><p><br/></p></body></html></string> - </property> - <property name="text"> - <string>Load Default</string> - </property> - </widget> - </item> - <item row="3" column="1"> - <widget class="QLineEdit" name="lineEdit_workDir"> - <property name="sizePolicy"> - <sizepolicy hsizetype="Expanding" vsizetype="Fixed"> - <horstretch>0</horstretch> - <verstretch>0</verstretch> - </sizepolicy> - </property> - <property name="toolTip"> - <string><html><head/><body><p>Directory to save outcome of the data reduction</p></body></html></string> - </property> - </widget> - </item> - <item row="1" column="1"> - <widget class="QLineEdit" name="lineEdit_url"> - <property name="sizePolicy"> - <sizepolicy hsizetype="Expanding" vsizetype="Fixed"> - <horstretch>0</horstretch> - <verstretch>0</verstretch> - </sizepolicy> - </property> - </widget> - </item> - <item row="1" column="0"> - <widget class="QLabel" name="label_url"> - <property name="sizePolicy"> - <sizepolicy hsizetype="Fixed" vsizetype="Preferred"> - <horstretch>0</horstretch> - <verstretch>0</verstretch> - </sizepolicy> - </property> - <property name="minimumSize"> - <size> - <width>80</width> - <height>0</height> - </size> - </property> - <property name="toolTip"> - <string><html><head/><body><p>URL of the http server to download HB3A data</p></body></html></string> - </property> - <property name="text"> - <string>Server URL</string> - </property> - </widget> - </item> - <item row="3" column="2"> - <widget class="QPushButton" name="pushButton_browseWorkDir"> - <property name="text"> - <string>Browse</string> - </property> - </widget> - </item> - <item row="3" column="0"> - <widget class="QLabel" name="label_2"> - <property name="sizePolicy"> - <sizepolicy hsizetype="Fixed" vsizetype="Preferred"> - <horstretch>0</horstretch> - <verstretch>0</verstretch> - </sizepolicy> - </property> - <property name="minimumSize"> - <size> - <width>140</width> - <height>1</height> - </size> - </property> - <property name="maximumSize"> - <size> - <width>140</width> - <height>16777215</height> - </size> - </property> - <property name="toolTip"> - <string><html><head/><body><p>Directory to save outcome of the data reduction</p></body></html></string> - </property> - <property name="text"> - <string>Working Direcotry</string> - </property> - </widget> - </item> - <item row="4" column="2"> - <widget class="QPushButton" name="pushButton_applySetup"> - <property name="font"> - <font> - <weight>75</weight> - <bold>true</bold> - </font> - </property> - <property name="text"> - <string>Apply</string> - </property> - </widget> - </item> - </layout> - </widget> - </item> - <item row="4" column="0"> - <widget class="QGroupBox" name="groupBox_2"> - <property name="title"> - <string>Data Download</string> - </property> - <layout class="QGridLayout" name="gridLayout_12"> - <item row="0" column="2"> - <widget class="QPushButton" name="pushButton_browseLocalCache"> - <property name="sizePolicy"> - <sizepolicy hsizetype="Fixed" vsizetype="Fixed"> - <horstretch>0</horstretch> - <verstretch>0</verstretch> - </sizepolicy> - </property> - <property name="minimumSize"> - <size> - <width>100</width> - <height>0</height> - </size> - </property> - <property name="maximumSize"> - <size> - <width>100</width> - <height>16777215</height> - </size> - </property> - <property name="text"> - <string>Browse</string> - </property> - </widget> - </item> - <item row="2" column="0"> - <widget class="QLabel" name="label"> - <property name="sizePolicy"> - <sizepolicy hsizetype="Fixed" vsizetype="Preferred"> - <horstretch>0</horstretch> - <verstretch>0</verstretch> - </sizepolicy> - </property> - <property name="minimumSize"> - <size> - <width>80</width> - <height>0</height> - </size> - </property> - <property name="text"> - <string>Scans List</string> + <widget class="QLineEdit" name="lineEdit_defaultSampleDetDistance"> + <property name="enabled"> + <bool>false</bool> </property> - </widget> - </item> - <item row="1" column="1"> - <widget class="QComboBox" name="comboBox_mode"> <property name="sizePolicy"> - <sizepolicy hsizetype="Expanding" vsizetype="Fixed"> + <sizepolicy hsizetype="Preferred" vsizetype="Fixed"> <horstretch>0</horstretch> <verstretch>0</verstretch> </sizepolicy> </property> - <property name="toolTip"> - <string><html><head/><body><p>Mode &quot;download&quot;: download data to local disk;</p><p>Mode &quot;http server only&quot;: download data to cache, process and delete cached data upon returning</p></body></html></string> + <property name="text"> + <string>0.3750</string> </property> - <item> - <property name="text"> - <string>Download Complete Experiment</string> - </property> - </item> - <item> - <property name="text"> - <string>Download Selected Scans</string> - </property> - </item> </widget> </item> - <item row="0" column="1"> - <widget class="QLineEdit" name="lineEdit_localSrcDir"> - <property name="toolTip"> - <string><html><head/><body><p>Cache on local disk. The dowloaded data will be saved to here. </p></body></html></string> + <item row="1" column="0"> + <widget class="QLabel" name="label_38"> + <property name="text"> + <string>Calibrated Detector Sample Distance</string> </property> </widget> </item> - <item row="1" column="0"> - <widget class="QLabel" name="label_datamode"> - <property name="sizePolicy"> - <sizepolicy hsizetype="Fixed" vsizetype="Preferred"> - <horstretch>0</horstretch> - <verstretch>0</verstretch> - </sizepolicy> - </property> - <property name="minimumSize"> - <size> - <width>80</width> - <height>0</height> - </size> + <item row="1" column="2"> + <widget class="QPushButton" name="pushButton_applyCalibratedSampleDistance"> + <property name="enabled"> + <bool>true</bool> </property> <property name="text"> - <string>Download Mode</string> + <string>Apply</string> </property> </widget> </item> - <item row="0" column="0"> - <widget class="QLabel" name="label_4"> + <item row="1" column="1"> + <widget class="QLineEdit" name="lineEdit_userDetSampleDistance"> <property name="sizePolicy"> - <sizepolicy hsizetype="Fixed" vsizetype="Preferred"> + <sizepolicy hsizetype="Preferred" vsizetype="Fixed"> <horstretch>0</horstretch> <verstretch>0</verstretch> </sizepolicy> </property> - <property name="minimumSize"> - <size> - <width>140</width> - <height>0</height> - </size> - </property> - <property name="maximumSize"> - <size> - <width>140</width> - <height>16777215</height> - </size> - </property> - <property name="text"> - <string>Destination</string> - </property> </widget> </item> - <item row="0" column="6"> - <spacer name="horizontalSpacer_2"> + <item row="0" column="3"> + <spacer name="horizontalSpacer_29"> <property name="orientation"> <enum>Qt::Horizontal</enum> </property> <property name="sizeType"> - <enum>QSizePolicy::Minimum</enum> + <enum>QSizePolicy::Preferred</enum> </property> <property name="sizeHint" stdset="0"> <size> @@ -647,59 +311,101 @@ p, li { white-space: pre-wrap; } </property> </spacer> </item> - <item row="1" column="2"> - <widget class="QPushButton" name="pushButton_downloadExpData"> - <property name="font"> - <font> - <weight>75</weight> - <bold>true</bold> - </font> + <item row="0" column="0"> + <widget class="QLabel" name="label_33"> + <property name="text"> + <string>Default Detector Sample Distance</string> </property> - <property name="toolTip"> - <string><html><head/><body><p><span style=" font-weight:400;">Download scans specified by 'Scans List'; </span></p><p><span style=" font-weight:400;">If 'Scans List' is empty, then the complete experiment data will be downloaded</span></p></body></html></string> + </widget> + </item> + <item row="0" column="8"> + <widget class="QLabel" name="label_36"> + <property name="text"> + <string>Wavelength</string> </property> + </widget> + </item> + <item row="1" column="4"> + <widget class="QLabel" name="label_10"> <property name="text"> - <string>Download</string> + <string>Calibrated Detector Center</string> </property> </widget> </item> - <item row="2" column="1"> - <widget class="QLineEdit" name="lineEdit_downloadScans"/> + <item row="1" column="5"> + <layout class="QHBoxLayout" name="horizontalLayout_17"> + <item> + <widget class="QLineEdit" name="lineEdit_detCenterPixHorizontal"> + <property name="sizePolicy"> + <sizepolicy hsizetype="Preferred" vsizetype="Fixed"> + <horstretch>0</horstretch> + <verstretch>0</verstretch> + </sizepolicy> + </property> + <property name="maximumSize"> + <size> + <width>100</width> + <height>16777215</height> + </size> + </property> + <property name="toolTip"> + <string><html><head/><body><p><span style=" font-weight:600;">ROW NUMBER</span> of User-specified detector center</p></body></html></string> + </property> + </widget> + </item> + <item> + <widget class="QLineEdit" name="lineEdit_detCenterPixVertical"> + <property name="sizePolicy"> + <sizepolicy hsizetype="Preferred" vsizetype="Fixed"> + <horstretch>0</horstretch> + <verstretch>0</verstretch> + </sizepolicy> + </property> + <property name="maximumSize"> + <size> + <width>100</width> + <height>16777215</height> + </size> + </property> + <property name="toolTip"> + <string><html><head/><body><p><span style=" font-weight:600;">COLUMN NUMBER</span> of User-specified detector center</p></body></html></string> + </property> + </widget> + </item> + </layout> </item> - <item row="2" column="2"> - <widget class="QPushButton" name="pushButton_ListScans"> + <item row="1" column="6"> + <widget class="QPushButton" name="pushButton_applyUserDetCenter"> + <property name="enabled"> + <bool>true</bool> + </property> <property name="text"> - <string>List Scans</string> + <string>Apply</string> </property> </widget> </item> - </layout> - </widget> - </item> - <item row="5" column="0"> - <widget class="QGroupBox" name="groupBox_11"> - <property name="title"> - <string>Instrument Calibration</string> - </property> - <layout class="QGridLayout" name="gridLayout_16"> - <item row="0" column="11"> - <spacer name="horizontalSpacer_26"> - <property name="orientation"> - <enum>Qt::Horizontal</enum> + <item row="0" column="4"> + <widget class="QLabel" name="label_19"> + <property name="text"> + <string>Default Detector Center</string> </property> - <property name="sizeHint" stdset="0"> - <size> - <width>40</width> - <height>20</height> - </size> + </widget> + </item> + <item row="0" column="10"> + <widget class="QPushButton" name="pushButton_applyUserWavelength"> + <property name="text"> + <string>Apply</string> </property> - </spacer> + </widget> </item> - <item row="1" column="11"> - <spacer name="horizontalSpacer_16"> + <item row="0" column="7"> + <spacer name="horizontalSpacer_34"> <property name="orientation"> <enum>Qt::Horizontal</enum> </property> + <property name="sizeType"> + <enum>QSizePolicy::Preferred</enum> + </property> <property name="sizeHint" stdset="0"> <size> <width>40</width> @@ -708,11 +414,8 @@ p, li { white-space: pre-wrap; } </property> </spacer> </item> - <item row="0" column="1"> - <widget class="QLineEdit" name="lineEdit_defaultSampleDetDistance"> - <property name="enabled"> - <bool>false</bool> - </property> + <item row="0" column="9"> + <widget class="QLineEdit" name="lineEdit_userWaveLength"> <property name="sizePolicy"> <sizepolicy hsizetype="Preferred" vsizetype="Fixed"> <horstretch>0</horstretch> @@ -720,39 +423,79 @@ p, li { white-space: pre-wrap; } </sizepolicy> </property> <property name="text"> - <string>0.3750</string> + <string/> </property> </widget> </item> - <item row="1" column="0"> - <widget class="QLabel" name="label_38"> + <item row="0" column="5"> + <widget class="QLineEdit" name="lineEdit_defaultDetCenter"> + <property name="enabled"> + <bool>false</bool> + </property> <property name="text"> - <string>Calibrated Detector Sample Distance</string> + <string>115, 128</string> </property> </widget> </item> - <item row="1" column="2"> - <widget class="QPushButton" name="pushButton_applyCalibratedSampleDistance"> - <property name="enabled"> - <bool>true</bool> + <item row="1" column="8"> + <widget class="QLabel" name="label_70"> + <property name="text"> + <string>Detector Size</string> </property> + </widget> + </item> + <item row="1" column="10"> + <widget class="QPushButton" name="pushButton_applyDetectorSize"> <property name="text"> <string>Apply</string> </property> </widget> </item> + <item row="1" column="9"> + <widget class="QComboBox" name="comboBox_detectorSize"> + <item> + <property name="text"> + <string>256 x 256</string> + </property> + </item> + <item> + <property name="text"> + <string>512 x 512</string> + </property> + </item> + </widget> + </item> + </layout> + </widget> + </item> + <item row="2" column="0"> + <widget class="QGroupBox" name="groupBox"> + <property name="title"> + <string>Directories Setup</string> + </property> + <layout class="QGridLayout" name="gridLayout_11"> + <item row="1" column="2"> + <widget class="QPushButton" name="pushButton_browseLocalDataDir"> + <property name="text"> + <string>Browse</string> + </property> + </widget> + </item> <item row="1" column="1"> - <widget class="QLineEdit" name="lineEdit_userDetSampleDistance"> + <widget class="QLineEdit" name="lineEdit_localSpiceDir"> <property name="sizePolicy"> - <sizepolicy hsizetype="Preferred" vsizetype="Fixed"> + <sizepolicy hsizetype="Expanding" vsizetype="Fixed"> <horstretch>0</horstretch> <verstretch>0</verstretch> </sizepolicy> </property> + <property name="toolTip"> + <string><html><head/><body><p>Directory for local data storage</p></body></html></string> + </property> </widget> </item> - <item row="0" column="3"> - <spacer name="horizontalSpacer_29"> + <item row="1" column="3"> + <spacer name="horizontalSpacer"> <property name="orientation"> <enum>Qt::Horizontal</enum> </property> @@ -767,95 +510,81 @@ p, li { white-space: pre-wrap; } </property> </spacer> </item> - <item row="0" column="0"> - <widget class="QLabel" name="label_33"> - <property name="text"> - <string>Default Detector Sample Distance</string> + <item row="1" column="0"> + <widget class="QLabel" name="label_dir"> + <property name="sizePolicy"> + <sizepolicy hsizetype="Fixed" vsizetype="Preferred"> + <horstretch>0</horstretch> + <verstretch>0</verstretch> + </sizepolicy> + </property> + <property name="minimumSize"> + <size> + <width>80</width> + <height>0</height> + </size> + </property> + <property name="toolTip"> + <string><html><head/><body><p>Directory for local data storage</p></body></html></string> </property> - </widget> - </item> - <item row="0" column="8"> - <widget class="QLabel" name="label_36"> <property name="text"> - <string>Wavelength</string> + <string>Data Directory</string> + </property> + <property name="alignment"> + <set>Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter</set> </property> </widget> </item> - <item row="1" column="4"> - <widget class="QLabel" name="label_10"> + <item row="1" column="6"> + <widget class="QPushButton" name="pushButton_browseWorkDir"> <property name="text"> - <string>Calibrated Detector Center</string> + <string>Browse</string> </property> </widget> </item> <item row="1" column="5"> - <layout class="QHBoxLayout" name="horizontalLayout_17"> - <item> - <widget class="QLineEdit" name="lineEdit_detCenterPixHorizontal"> - <property name="sizePolicy"> - <sizepolicy hsizetype="Preferred" vsizetype="Fixed"> - <horstretch>0</horstretch> - <verstretch>0</verstretch> - </sizepolicy> - </property> - <property name="maximumSize"> - <size> - <width>100</width> - <height>16777215</height> - </size> - </property> - <property name="toolTip"> - <string><html><head/><body><p><span style=" font-weight:600;">ROW NUMBER</span> of User-specified detector center</p></body></html></string> - </property> - </widget> - </item> - <item> - <widget class="QLineEdit" name="lineEdit_detCenterPixVertical"> - <property name="sizePolicy"> - <sizepolicy hsizetype="Preferred" vsizetype="Fixed"> - <horstretch>0</horstretch> - <verstretch>0</verstretch> - </sizepolicy> - </property> - <property name="maximumSize"> - <size> - <width>100</width> - <height>16777215</height> - </size> - </property> - <property name="toolTip"> - <string><html><head/><body><p><span style=" font-weight:600;">COLUMN NUMBER</span> of User-specified detector center</p></body></html></string> - </property> - </widget> - </item> - </layout> - </item> - <item row="1" column="6"> - <widget class="QPushButton" name="pushButton_applyUserDetCenter"> - <property name="enabled"> - <bool>true</bool> + <widget class="QLineEdit" name="lineEdit_workDir"> + <property name="sizePolicy"> + <sizepolicy hsizetype="Expanding" vsizetype="Fixed"> + <horstretch>0</horstretch> + <verstretch>0</verstretch> + </sizepolicy> </property> - <property name="text"> - <string>Apply</string> + <property name="toolTip"> + <string><html><head/><body><p>Directory to save outcome of the data reduction</p></body></html></string> </property> </widget> </item> - <item row="0" column="4"> - <widget class="QLabel" name="label_19"> - <property name="text"> - <string>Default Detector Center</string> + <item row="1" column="4"> + <widget class="QLabel" name="label_2"> + <property name="sizePolicy"> + <sizepolicy hsizetype="Fixed" vsizetype="Preferred"> + <horstretch>0</horstretch> + <verstretch>0</verstretch> + </sizepolicy> + </property> + <property name="minimumSize"> + <size> + <width>140</width> + <height>1</height> + </size> + </property> + <property name="maximumSize"> + <size> + <width>140</width> + <height>16777215</height> + </size> + </property> + <property name="toolTip"> + <string><html><head/><body><p>Directory to save outcome of the data reduction</p></body></html></string> </property> - </widget> - </item> - <item row="0" column="10"> - <widget class="QPushButton" name="pushButton_applyUserWavelength"> <property name="text"> - <string>Apply</string> + <string>Working Direcotry</string> </property> </widget> </item> - <item row="0" column="7"> - <spacer name="horizontalSpacer_34"> + <item row="2" column="7"> + <spacer name="horizontalSpacer_2"> <property name="orientation"> <enum>Qt::Horizontal</enum> </property> @@ -870,60 +599,127 @@ p, li { white-space: pre-wrap; } </property> </spacer> </item> - <item row="0" column="9"> - <widget class="QLineEdit" name="lineEdit_userWaveLength"> - <property name="sizePolicy"> - <sizepolicy hsizetype="Preferred" vsizetype="Fixed"> - <horstretch>0</horstretch> - <verstretch>0</verstretch> - </sizepolicy> - </property> + <item row="2" column="0"> + <widget class="QLabel" name="label"> <property name="text"> - <string/> + <string>Pre-processed Data Directory</string> </property> </widget> </item> - <item row="0" column="5"> - <widget class="QLineEdit" name="lineEdit_defaultDetCenter"> - <property name="enabled"> - <bool>false</bool> + <item row="2" column="8"> + <widget class="QPushButton" name="pushButton_applySetup"> + <property name="font"> + <font> + <weight>75</weight> + <bold>true</bold> + </font> + </property> + <property name="toolTip"> + <string><html><head/><body><p><span style=" font-weight:400;">Check directories and apply!</span></p></body></html></string> </property> <property name="text"> - <string>115, 128</string> + <string>Apply</string> </property> </widget> </item> - <item row="1" column="8"> - <widget class="QLabel" name="label_70"> + <item row="2" column="1"> + <widget class="QLineEdit" name="lineEdit_preprocessedDir"/> + </item> + <item row="2" column="2"> + <widget class="QPushButton" name="pushButton_browsePreprocessed"> <property name="text"> - <string>Detector Size</string> + <string>Browse</string> </property> </widget> </item> - <item row="1" column="10"> - <widget class="QPushButton" name="pushButton_applyDetectorSize"> + <item row="3" column="1"> + <widget class="QCheckBox" name="checkBox_searchPreprocessedFirst"> <property name="text"> - <string>Apply</string> + <string>Search Pre-processed Scan First</string> </property> </widget> </item> - <item row="1" column="9"> - <widget class="QComboBox" name="comboBox_detectorSize"> - <item> - <property name="text"> - <string>256 x 256</string> - </property> - </item> - <item> - <property name="text"> - <string>512 x 512</string> - </property> - </item> - </widget> - </item> </layout> </widget> </item> + <item row="6" column="0"> + <spacer name="verticalSpacer_2"> + <property name="orientation"> + <enum>Qt::Vertical</enum> + </property> + <property name="sizeHint" stdset="0"> + <size> + <width>20</width> + <height>40</height> + </size> + </property> + </spacer> + </item> + <item row="4" column="0"> + <spacer name="verticalSpacer_4"> + <property name="orientation"> + <enum>Qt::Vertical</enum> + </property> + <property name="sizeType"> + <enum>QSizePolicy::Preferred</enum> + </property> + <property name="sizeHint" stdset="0"> + <size> + <width>20</width> + <height>40</height> + </size> + </property> + </spacer> + </item> + <item row="0" column="0"> + <widget class="QTextEdit" name="textEdit"> + <property name="enabled"> + <bool>true</bool> + </property> + <property name="sizePolicy"> + <sizepolicy hsizetype="Expanding" vsizetype="Preferred"> + <horstretch>0</horstretch> + <verstretch>0</verstretch> + </sizepolicy> + </property> + <property name="readOnly"> + <bool>true</bool> + </property> + <property name="html"> + <string><!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> +<html><head><meta name="qrichtext" content="1" /><style type="text/css"> +p, li { white-space: pre-wrap; } +</style></head><body style=" font-family:'Ubuntu'; font-size:11pt; font-weight:400; font-style:normal;"> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:14pt; font-weight:600; font-style:italic;">DON'T PANIC! IT IS </span><span style=" font-size:7pt; font-style:italic; vertical-align:sub;">(supposed to be)</span><span style=" font-size:14pt; font-weight:600; font-style:italic;"> EASY TO USE!</span></p> +<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:10pt; font-weight:600;"><br /></p> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:10pt; font-weight:600;">1. Configure the data reduction</span></p> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:10pt; font-style:italic;">(a) Set </span><span style=" font-size:10pt; font-weight:600; font-style:italic;">Experiment </span><span style=" font-size:10pt; font-style:italic;">;</span></p> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:10pt; font-style:italic;">(b) Set up Data Directory if it is red;</span></p> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:10pt; font-style:italic;">(c) Set up Working Directory if it is not what you want.</span></p> +<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:10pt; font-style:italic;"><br /></p> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:10pt; font-weight:600;">2. Set experiment and calibration parameters</span></p> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:10pt; font-style:italic;">(a) If sample distance is not 0.375 m, set it up;</span></p> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:10pt; font-style:italic;">(b) If wave length is not standard, set it up;</span></p> +<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:10pt; font-style:italic;">(c) Set up calibrated detector center</span></p></body></html></string> + </property> + </widget> + </item> + <item row="1" column="0"> + <spacer name="verticalSpacer_32"> + <property name="orientation"> + <enum>Qt::Vertical</enum> + </property> + <property name="sizeType"> + <enum>QSizePolicy::Preferred</enum> + </property> + <property name="sizeHint" stdset="0"> + <size> + <width>20</width> + <height>40</height> + </size> + </property> + </spacer> + </item> </layout> </widget> <widget class="QWidget" name="tab_survey"> @@ -1178,6 +974,13 @@ p, li { white-space: pre-wrap; } </property> </widget> </item> + <item> + <widget class="QCheckBox" name="checkBox_addPeak2UBMask"> + <property name="text"> + <string>apply roi</string> + </property> + </widget> + </item> <item> <widget class="QComboBox" name="comboBox_maskNamesSurvey"> <property name="font"> @@ -5444,7 +5247,7 @@ p, li { white-space: pre-wrap; } <string>Advanced Setup</string> </attribute> <layout class="QGridLayout" name="gridLayout_27"> - <item row="0" column="2"> + <item row="1" column="2"> <spacer name="horizontalSpacer_25"> <property name="orientation"> <enum>Qt::Horizontal</enum> @@ -5460,7 +5263,7 @@ p, li { white-space: pre-wrap; } </property> </spacer> </item> - <item row="0" column="0"> + <item row="1" column="0"> <widget class="QGroupBox" name="groupBox_16"> <property name="title"> <string>Last 3 Projects</string> @@ -5586,7 +5389,7 @@ p, li { white-space: pre-wrap; } </layout> </widget> </item> - <item row="2" column="0"> + <item row="3" column="0"> <spacer name="verticalSpacer_25"> <property name="orientation"> <enum>Qt::Vertical</enum> @@ -5602,7 +5405,7 @@ p, li { white-space: pre-wrap; } </property> </spacer> </item> - <item row="1" column="2"> + <item row="2" column="2"> <spacer name="horizontalSpacer_35"> <property name="orientation"> <enum>Qt::Horizontal</enum> @@ -5618,7 +5421,7 @@ p, li { white-space: pre-wrap; } </property> </spacer> </item> - <item row="1" column="0"> + <item row="2" column="0"> <widget class="QGroupBox" name="groupBox_23"> <property name="title"> <string>Constants</string> @@ -5729,6 +5532,24 @@ p, li { white-space: pre-wrap; } </layout> </widget> </item> + <item row="0" column="0"> + <widget class="QGroupBox" name="groupBox_2"> + <property name="title"> + <string>Instrument</string> + </property> + <layout class="QGridLayout" name="gridLayout_2"> + <item row="0" column="0"> + <widget class="QComboBox" name="comboBox_instrument"> + <item> + <property name="text"> + <string>HB3A</string> + </property> + </item> + </widget> + </item> + </layout> + </widget> + </item> </layout> </widget> </widget> @@ -5745,7 +5566,7 @@ p, li { white-space: pre-wrap; } <x>0</x> <y>0</y> <width>1568</width> - <height>22</height> + <height>25</height> </rect> </property> <widget class="QMenu" name="menuFile"> @@ -5781,6 +5602,7 @@ p, li { white-space: pre-wrap; } <string>Admin Tools</string> </property> <addaction name="actionPre_Processing"/> + <addaction name="actionData_Downloading"/> </widget> <addaction name="menuFile"/> <addaction name="menuTools"/> @@ -5900,6 +5722,11 @@ p, li { white-space: pre-wrap; } <string>Pre-Processing</string> </property> </action> + <action name="actionData_Downloading"> + <property name="text"> + <string>Download Data</string> + </property> + </action> </widget> <customwidgets> <customwidget> diff --git a/scripts/HFIR_4Circle_Reduction/PreprocessWindow.py b/scripts/HFIR_4Circle_Reduction/PreprocessWindow.py index ea5da908f99f005ce9e1d82309558b770883fd06..d54453a8fed5989e9c0a5584f3b35241373e2590 100644 --- a/scripts/HFIR_4Circle_Reduction/PreprocessWindow.py +++ b/scripts/HFIR_4Circle_Reduction/PreprocessWindow.py @@ -1,8 +1,11 @@ import os +import time +import csv from PyQt4 import QtGui, QtCore import ui_preprocess_window import reduce4circleControl import guiutility as gui_util +import HFIR_4Circle_Reduction.fourcircle_utility as fourcircle_utility import NTableWidget @@ -17,6 +20,21 @@ class ScanPreProcessWindow(QtGui.QMainWindow): """ super(ScanPreProcessWindow, self).__init__(parent) + # class variables + self._myController = None + self._myMergePeaksThread = None + self._rowScanDict = dict() + + # mutex and data structure that can be in contention + self._recordLogMutex = False + self._scansToProcess = set() + self._mdFileDict = dict() + self._scanNumbersProcessed = set() + + # current experiment number in processing + self._currExpNumber = None + self._outputDir = None + # define UI self.ui = ui_preprocess_window.Ui_PreprocessWindow() self.ui.setupUi(self) @@ -49,11 +67,6 @@ class ScanPreProcessWindow(QtGui.QMainWindow): self.connect(self.ui.actionExit, QtCore.SIGNAL('triggered()'), self.do_quit) - # class variables - self._myController = None - self._myMergePeaksThread = None - self._rowScanDict = dict() - return @property @@ -145,7 +158,7 @@ class ScanPreProcessWindow(QtGui.QMainWindow): self.set_calibration_to_reduction_controller(exp_number) # set up GUI - self._rowScanDict = self.ui.tableView_scanProcessState.add_new_scans(scan_list, append=True) + self._rowScanDict = self.ui.tableView_scanProcessState.add_new_scans(scan_list) # form the output files output_dir = str(self.ui.lineEdit_outputDir.text()) @@ -153,15 +166,21 @@ class ScanPreProcessWindow(QtGui.QMainWindow): # create output directory and change to all accessible os.mkdir(output_dir) os.chmod(output_dir, 0o777) + self._outputDir = output_dir file_list = list() for scan in scan_list: - md_file_name = os.path.join(output_dir, 'Exp{0}_Scan{1}_MD.nxs'.format(exp_number, scan)) + # md_file_name = os.path.join(output_dir, 'Exp{0}_Scan{1}_MD.nxs'.format(exp_number, scan)) + md_file_name = fourcircle_utility.pre_processed_file_name(exp_number, scan, output_dir) file_list.append(md_file_name) # launch the multiple threading to scans self._myMergePeaksThread = multi_threads_helpers.MergePeaksThread(self, exp_number, scan_list, file_list) + self._scansToProcess = set(scan_list) + self._scanNumbersProcessed = set() + + self._currExpNumber = exp_number self._myMergePeaksThread.start() return @@ -336,7 +355,7 @@ class ScanPreProcessWindow(QtGui.QMainWindow): self.ui.lineEdit_outputDir.setText(default_output_dir) except OSError: self.ui.lineEdit_outputDir.setText('/tmp') - default_output_dir = '/tmp' + # default_output_dir = '/tmp' # END-IF # END-IF @@ -362,6 +381,33 @@ class ScanPreProcessWindow(QtGui.QMainWindow): :param file_name: :return: """ + # file is written, then check whether it is time to write a record file + counter = 0 + while self._recordLogMutex: + # waiting for the mutex to be released + time.sleep(0.1) + counter += 1 + if counter > 600: # 60 seconds... too long + raise RuntimeError('It is too long to wait for mutex released. There must be a bug!') + # END-WHILE + + # update processed scan numbers + self._recordLogMutex = True + self._scanNumbersProcessed.add(scan_number) + self._mdFileDict[scan_number] = file_name + self._recordLogMutex = False + + # check whether it is time to write all the scans to file + print '[DB...BAT] Scans to process: {0} vs \n\tScans processed: {1}' \ + ''.format(self._scansToProcess, self._scanNumbersProcessed) + + if len(self._scansToProcess) == len(self._scanNumbersProcessed): + self.update_record_file(self._currExpNumber, check_duplicates=False, scan_list=self._scanNumbersProcessed) + if self._scansToProcess != self._scanNumbersProcessed: + raise RuntimeWarning('Scans to process {0} is not same as scans processed {1}.' + ''.format(self._scansToProcess, self._scanNumbersProcessed)) + # END-IF + row_number = self._rowScanDict[scan_number] self.ui.tableView_scanProcessState.set_file_name(row_number, file_name) self.ui.tableView_scanProcessState.resizeColumnsToContents() @@ -380,6 +426,45 @@ class ScanPreProcessWindow(QtGui.QMainWindow): return + def update_record_file(self, exp_number, check_duplicates, scan_list): + """ + update the record file + it is an option to append file or check and remove duplication. + duplication can be removed in the record file loading method by checking the time stamp + :param check_duplicates: + :return: + """ + # check inputs + assert len(self._scanNumbersProcessed) > 0, 'Processed scan number set cannot be empty!' + + # get calibration information + det_sample_distance = self._myController.get_calibrated_det_sample_distance(exp_number=exp_number) + det_center_x, det_center_y = self._myController.get_calibrated_det_center(exp_number) + user_wave_length = self._myController.get_calibrated_wave_length(exp_number) + + record_file_name = fourcircle_utility.pre_processed_record_file(exp_number, self._outputDir) + if os.path.exists(record_file_name): + write_header = False + else: + write_header = True + + with open(record_file_name, 'a') as csvfile: + fieldnames = fourcircle_utility.pre_processed_record_header() + writer = csv.DictWriter(csvfile, fieldnames=fieldnames) + + # write header for the first time + if write_header: + writer.writeheader() + + for scan_number in scan_list: + record = fourcircle_utility.pre_processed_record_make(scan_number, self._mdFileDict[scan_number], + det_sample_distance, + det_center_x, det_center_y, user_wave_length) + writer.writerow(record) + # END-FOR + + # END-WITH + class ScanPreProcessStatusTable(NTableWidget.NTableWidget): """ diff --git a/scripts/HFIR_4Circle_Reduction/downloaddialog.py b/scripts/HFIR_4Circle_Reduction/downloaddialog.py new file mode 100644 index 0000000000000000000000000000000000000000..469703f43df81579d5af04bc8dcf3228ae6c956c --- /dev/null +++ b/scripts/HFIR_4Circle_Reduction/downloaddialog.py @@ -0,0 +1,229 @@ +########## +# Dialog to set up HTTP data downloading server and download HB3A data to local +########## +import os +from PyQt4 import QtCore +from PyQt4 import QtGui +import HFIR_4Circle_Reduction.fourcircle_utility as hb3a_util +from HFIR_4Circle_Reduction import ui_httpserversetup as ui_http + + +try: + _fromUtf8 = QtCore.QString.fromUtf8 +except AttributeError: + def _fromUtf8(s): + return s + + +class DataDownloadDialog(QtGui.QDialog): + """ dialog for set up HTTP server and download files to local computer + This feature will be valid until SNS disables the HTTP server for HFIR data + """ + def __init__(self, parent): + """ + initialization + :param parent: + """ + super(DataDownloadDialog, self).__init__(parent) + + # set up UI + self.ui = ui_http.Ui_Dialog() + self.ui.setupUi(self) + + # initialize widgets + self._init_widgets() + + # define event handing + self.connect(self.ui.pushButton_testURLs, QtCore.SIGNAL('clicked()'), + self.do_test_url) + + self.connect(self.ui.pushButton_downloadExpData, QtCore.SIGNAL('clicked()'), + self.do_download_spice_data) + + self.connect(self.ui.pushButton_ListScans, QtCore.SIGNAL('clicked()'), + self.do_list_scans) + + self.connect(self.ui.comboBox_mode, QtCore.SIGNAL('currentIndexChanged(int)'), + self.do_change_data_access_mode) + + # self.connect(self.ui.pushButton_useDefaultDir, QtCore.SIGNAL('clicked()'), + # self.do_setup_dir_default) + self.connect(self.ui.pushButton_browseLocalCache, QtCore.SIGNAL('clicked()'), + self.do_browse_local_cache_dir) + + # Set the URL red as it is better not check at this stage. Leave it to user + self.ui.lineEdit_url.setStyleSheet("color: black;") + + # define class variable + self._homeSrcDir = os.getcwd() + try: + self._myControl = None + self._myControl = parent.controller + except AttributeError as att_err: + print att_err + + # experiment number + self._expNumber = None + + return + + def _init_widgets(self): + """ + initialize widgets + :return: + """ + self.ui.lineEdit_url.setText('http://neutron.ornl.gov/user_data/hb3a/') + + return + + def do_browse_local_cache_dir(self): + """ Browse local cache directory + :return: + """ + local_cache_dir = str(QtGui.QFileDialog.getExistingDirectory(self, + 'Get Local Cache Directory', + self._homeSrcDir)) + + # Set local directory to control + status, error_message = self._myControl.set_local_data_dir(local_cache_dir) + if status is False: + self.pop_one_button_dialog(error_message) + return + + # Synchronize to local data/spice directory and local cache directory + # if str(self.ui.lineEdit_localSpiceDir.text()) != '': + # prev_dir = str(self.ui.lineEdit_localSrcDir.text()) + # self.pop_one_button_dialog('Local data directory was set up as %s' % + # prev_dir) + self.ui.lineEdit_localSrcDir.setText(local_cache_dir) + # self.ui.lineEdit_localSpiceDir.setText(local_cache_dir) + + return + + def do_change_data_access_mode(self): + """ Change data access mode between downloading from server and local + Event handling methods + :return: + """ + # TODO/FIXME/NOW - Find out whether these widgets are used in the dialog + # new_mode = str(self.ui.comboBox_mode.currentText()) + # self._dataAccessMode = new_mode + + # if new_mode.startswith('Local') is True: + # self.ui.lineEdit_localSpiceDir.setEnabled(True) + # self.ui.pushButton_browseLocalDataDir.setEnabled(True) + # self.ui.lineEdit_url.setEnabled(False) + # self.ui.lineEdit_localSrcDir.setEnabled(False) + # self.ui.pushButton_browseLocalCache.setEnabled(False) + # self._allowDownload = False + # else: + # self.ui.lineEdit_localSpiceDir.setEnabled(False) + # self.ui.pushButton_browseLocalDataDir.setEnabled(False) + # self.ui.lineEdit_url.setEnabled(True) + # self.ui.lineEdit_localSrcDir.setEnabled(True) + # self.ui.pushButton_browseLocalCache.setEnabled(True) + # self._allowDownload = True + + return + + def do_download_spice_data(self): + """ Download SPICE data + :return: + """ + # get experiment number + exp_no = self._expNumber + assert isinstance(exp_no, int), 'Experiment number {0} must be an integer but not a {1}.' \ + ''.format(exp_no, type(exp_no)) + + # Check scans to download + scan_list_str = str(self.ui.lineEdit_downloadScans.text()) + if len(scan_list_str) > 0: + # user specifies scans to download + valid, scan_list = hb3a_util.parse_int_array(scan_list_str) + if valid is False: + error_message = scan_list + self.pop_one_button_dialog(error_message) + else: + # Get all scans + server_url = str(self.ui.lineEdit_url.text()) + scan_list = hb3a_util.get_scans_list(server_url, exp_no, return_list=True) + self.pop_one_button_dialog('Going to download scans %s.' % str(scan_list)) + + # Check location + destination_dir = str(self.ui.lineEdit_localSrcDir.text()) + status, error_message = self._myControl.set_local_data_dir(destination_dir) + if status is False: + self.pop_one_button_dialog(error_message) + else: + self.pop_one_button_dialog('Spice files will be downloaded to %s.' % destination_dir) + + # Set up myControl for downloading data + self._myControl.set_exp_number(exp_no) + + server_url = str(self.ui.lineEdit_url.text()) + status, error_message = self._myControl.set_server_url(server_url) + if status is False: + self.pop_one_button_dialog(error_message) + return + + # Download + self._myControl.download_data_set(scan_list) + + return + + def do_list_scans(self): + """ List all scans available and show the information in a pop-up dialog + :return: + """ + # Experiment number + exp_no = int(self.ui.lineEdit_exp.text()) + + access_mode = str(self.ui.comboBox_mode.currentText()) + if access_mode == 'Local': + spice_dir = str(self.ui.lineEdit_localSpiceDir.text()) + message = hb3a_util.get_scans_list_local_disk(spice_dir, exp_no) + else: + url = str(self.ui.lineEdit_url.text()) + message = hb3a_util.get_scans_list(url, exp_no) + + self.pop_one_button_dialog(message) + + return + + def do_test_url(self): + """ Test whether the root URL provided specified is good + """ + url = str(self.ui.lineEdit_url.text()) + + url_is_good, err_msg = hb3a_util.check_url(url) + if url_is_good is True: + self.pop_one_button_dialog("URL %s is valid." % url) + self.ui.lineEdit_url.setStyleSheet("color: green;") + else: + self.pop_one_button_dialog(err_msg) + self.ui.lineEdit_url.setStyleSheet("color: read;") + + return url_is_good + + def pop_one_button_dialog(self, message): + """ Pop up a one-button dialog + :param message: + :return: + """ + assert isinstance(message, str), 'Input message %s must a string but not %s.' \ + '' % (str(message), type(message)) + QtGui.QMessageBox.information(self, '4-circle Data Reduction', message) + + return + + def set_experiment_number(self, exp_number): + """set the experiment number + :param exp_number: + :return: + """ + assert isinstance(exp_number, int), 'Experiment number {0} to set to download dialog must be an integer but ' \ + 'not a {1}.'.format(exp_number, type(exp_number)) + + self._expNumber = exp_number + + return diff --git a/scripts/HFIR_4Circle_Reduction/fourcircle_utility.py b/scripts/HFIR_4Circle_Reduction/fourcircle_utility.py index 1736ff749197fec8b642a69c6c926b0b7c3b739d..26f056438727a4af4c79a4e13578cc62a8212b4d 100644 --- a/scripts/HFIR_4Circle_Reduction/fourcircle_utility.py +++ b/scripts/HFIR_4Circle_Reduction/fourcircle_utility.py @@ -1,8 +1,10 @@ #pylint: disable=W0633,R0913,too-many-branches from __future__ import (absolute_import, division, print_function) from six.moves import range +import csv import os -try: # python3 +try: + # python3 from urllib.request import urlopen from urllib.error import URLError except ImportError: @@ -557,7 +559,8 @@ def get_merged_md_name(instrument_name, exp_no, scan_no, pt_list): # check assert isinstance(instrument_name, str) assert isinstance(exp_no, int) and isinstance(scan_no, int) - assert isinstance(pt_list, list) + assert isinstance(pt_list, list), 'Pt list {0} must be a list but not a {1}' \ + ''.format(pt_list, type(pt_list)) if len(pt_list) == 0: raise RuntimeError('Pt number list {0} cannot be empty.', pt_list) @@ -579,10 +582,14 @@ def get_merged_hkl_md_name(instrument_name, exp_no, scan_no, pt_list): :return: """ # check - assert isinstance(instrument_name, str), 'blabla' - assert isinstance(exp_no, int) and isinstance(scan_no, int), 'blabla' - assert isinstance(pt_list, list), 'blabla' - assert len(pt_list) > 0, 'blabla' + assert isinstance(instrument_name, str), 'Instrument name {0} shall be a string but not a {1}' \ + ''.format(instrument_name, type(instrument_name)) + assert isinstance(exp_no, int) and isinstance(scan_no, int),\ + 'Both experiment number {0} ({1}) and scan number {2} ({3}) shall be integer.' \ + ''.format(exp_no, type(exp_no), scan_no, type(scan_no)) + assert isinstance(pt_list, list), 'Pt list {0} shall be a list but not a {1}'.format(pt_list, type(pt_list)) + if len(pt_list) == 0: + raise RuntimeWarning('Pt list cannot be empty.') merged_ws_name = '%s_Exp%d_Scan%d_Pt%d_%d_HKL_MD' % (instrument_name, exp_no, scan_no, pt_list[0], pt_list[-1]) @@ -780,3 +787,141 @@ def is_peak_nuclear(index_h, index_k, index_l, magnetic_tolerance=0.2): return False return True + + +def write_pre_process_record(file_name, record_dict): + """write the pre-processed record file + :param file_name: + :param record_dict: dictionary related to record + :return: + """ + # check input + assert isinstance(file_name, str), 'Record file name {0} must be a string but not a {1}.' \ + ''.format(file_name, type(file_name)) + assert isinstance(record_dict, dict), 'One entry of record {0} must be given in a dictionary but not a {1}.' \ + ''.format(record_dict, type(record_dict)) + + # write record + is_new_file = not os.path.exists(file_name) + + with open(file_name, 'w') as csv_file: + field_names = record_dict.keys() + writer = csv.DictWriter(csv_file, fieldnames=field_names) + + # write header + if is_new_file: + writer.writeheader() + + # write row + writer.writerow(record_dict) + # END-WITH + + return + + +def pre_processed_file_name(exp_number, scan, output_dir): + """ + + :param exp_number: + :param scan: + :param output_dir: + :return: + """ + # check inputs + assert isinstance(exp_number, int), 'Experiment number must be an integer' + assert isinstance(scan, int), 'Scan number must be an integer' + assert output_dir is None or isinstance(output_dir, str), 'Output directory must be a None or a string.' + + md_file_name = 'Exp{0}_Scan{1}_MD.nxs'.format(exp_number, scan) + if output_dir is not None: + md_file_name = os.path.join(output_dir, md_file_name) + + return md_file_name + + +""" +NOTE +1. a CSV file in appending mode +2. file's name is standard and defined in fourcircile_utility +3. csv file contains: + Scan, MD file path, detector-sample distance, peak center pixel (int, int), wave length +""" + + +def pre_processed_record_file(exp_number, md_dir): + """ form the name of the pre-processed scans' record file + :param exp_number: + :param md_dir: + :return: + """ + # check + assert isinstance(exp_number, int), 'Experiment number must be an integer' + assert isinstance(md_dir, str), 'Target directory must be a string' + + record_file_name = os.path.join(md_dir, 'Exp{0}Record.txt'.format(exp_number)) + + return record_file_name + + +def pre_processed_record_header(): + """ give the header in pre-processed scan's record file in CSV format + :return: + """ + return ['Scan', 'MD', 'DetSampleDistance', 'Center', 'WaveLength'] + + +def pre_processed_record_make(scan_number, file_name, distance, center_x, center_y, wave_length): + """ make a pre-processed scan's entry in record file + :param scan_number: + :param file_name: + :param distance: + :param center_x: + :param center_y: + :param wave_length: + :return: a dictionary + """ + record = {'Scan': scan_number, + 'MD': file_name, + 'DetSampleDistance': distance, + 'Center': (center_x, center_y), + 'WaveLength': wave_length} + + return record + + +def read_pre_process_record(file_name): + """ Read a pre-processed scan record file + :param file_name: + :return: a dictionary + """ + # check input + assert isinstance(file_name, str), 'Record file name {0} must be a string but not a {1}.' \ + ''.format(file_name, type(file_name)) + if os.path.exists(file_name) is False: + raise RuntimeError('Pre-processed scan record file {0} does not exist.'.format(file_name)) + + # load file + scan_record_dict = dict() + with open(file_name, 'r') as csv_file: + reader = csv.DictReader(csv_file) + for row_dict in reader: + scan_number = int(row_dict['Scan']) + + if len(row_dict['DetSampleDistance']) > 0: + row_dict['DetSampleDistance'] = float(row_dict['DetSampleDistance']) + else: + row_dict['DetSampleDistance'] = None + + if len(row_dict['WaveLength']) > 0: + row_dict['WaveLength'] = float(row_dict['WaveLength']) + else: + row_dict['WaveLength'] = None + + center_str = row_dict['Center'].replace('(', '').replace(')', '').replace(',', ' ').strip() + tup_str = center_str.split() + row_dict['Center'] = int(tup_str[0]), int(tup_str[1]) + + scan_record_dict[scan_number] = row_dict + # END-WITH + + return scan_record_dict diff --git a/scripts/HFIR_4Circle_Reduction/httpserversetup.ui b/scripts/HFIR_4Circle_Reduction/httpserversetup.ui new file mode 100644 index 0000000000000000000000000000000000000000..484989587f027d4745033c94320bdaa02dac34c5 --- /dev/null +++ b/scripts/HFIR_4Circle_Reduction/httpserversetup.ui @@ -0,0 +1,285 @@ +<?xml version="1.0" encoding="UTF-8"?> +<ui version="4.0"> + <class>Dialog</class> + <widget class="QDialog" name="Dialog"> + <property name="geometry"> + <rect> + <x>0</x> + <y>0</y> + <width>677</width> + <height>304</height> + </rect> + </property> + <property name="windowTitle"> + <string>Dialog</string> + </property> + <layout class="QGridLayout" name="gridLayout_2"> + <item row="0" column="0"> + <widget class="QGroupBox" name="groupBox"> + <property name="title"> + <string>HTTP Sever Setup</string> + </property> + <layout class="QGridLayout" name="gridLayout"> + <item row="0" column="0"> + <widget class="QLabel" name="label_url"> + <property name="sizePolicy"> + <sizepolicy hsizetype="Fixed" vsizetype="Preferred"> + <horstretch>0</horstretch> + <verstretch>0</verstretch> + </sizepolicy> + </property> + <property name="minimumSize"> + <size> + <width>140</width> + <height>0</height> + </size> + </property> + <property name="toolTip"> + <string><html><head/><body><p>URL of the http server to download HB3A data</p></body></html></string> + </property> + <property name="text"> + <string>HTTP Server URL</string> + </property> + </widget> + </item> + <item row="0" column="2"> + <widget class="QPushButton" name="pushButton_testURLs"> + <property name="sizePolicy"> + <sizepolicy hsizetype="Preferred" vsizetype="Fixed"> + <horstretch>0</horstretch> + <verstretch>0</verstretch> + </sizepolicy> + </property> + <property name="minimumSize"> + <size> + <width>100</width> + <height>0</height> + </size> + </property> + <property name="maximumSize"> + <size> + <width>200</width> + <height>16777215</height> + </size> + </property> + <property name="text"> + <string>Test Set</string> + </property> + </widget> + </item> + <item row="0" column="1"> + <widget class="QLineEdit" name="lineEdit_url"> + <property name="sizePolicy"> + <sizepolicy hsizetype="Expanding" vsizetype="Fixed"> + <horstretch>0</horstretch> + <verstretch>0</verstretch> + </sizepolicy> + </property> + </widget> + </item> + <item row="0" column="3"> + <spacer name="horizontalSpacer"> + <property name="orientation"> + <enum>Qt::Horizontal</enum> + </property> + <property name="sizeType"> + <enum>QSizePolicy::Preferred</enum> + </property> + <property name="sizeHint" stdset="0"> + <size> + <width>40</width> + <height>20</height> + </size> + </property> + </spacer> + </item> + </layout> + </widget> + </item> + <item row="1" column="0"> + <widget class="QGroupBox" name="groupBox_2"> + <property name="title"> + <string>Data Download</string> + </property> + <layout class="QGridLayout" name="gridLayout_12"> + <item row="0" column="1"> + <widget class="QLineEdit" name="lineEdit_localSrcDir"> + <property name="toolTip"> + <string><html><head/><body><p>Cache on local disk. The dowloaded data will be saved to here. </p></body></html></string> + </property> + </widget> + </item> + <item row="1" column="1"> + <widget class="QComboBox" name="comboBox_mode"> + <property name="sizePolicy"> + <sizepolicy hsizetype="Expanding" vsizetype="Fixed"> + <horstretch>0</horstretch> + <verstretch>0</verstretch> + </sizepolicy> + </property> + <property name="toolTip"> + <string><html><head/><body><p>Mode &quot;download&quot;: download data to local disk;</p><p>Mode &quot;http server only&quot;: download data to cache, process and delete cached data upon returning</p></body></html></string> + </property> + <item> + <property name="text"> + <string>Download Complete Experiment</string> + </property> + </item> + <item> + <property name="text"> + <string>Download Selected Scans</string> + </property> + </item> + </widget> + </item> + <item row="2" column="1"> + <widget class="QLineEdit" name="lineEdit_downloadScans"/> + </item> + <item row="0" column="2"> + <widget class="QPushButton" name="pushButton_browseLocalCache"> + <property name="sizePolicy"> + <sizepolicy hsizetype="Fixed" vsizetype="Fixed"> + <horstretch>0</horstretch> + <verstretch>0</verstretch> + </sizepolicy> + </property> + <property name="minimumSize"> + <size> + <width>100</width> + <height>0</height> + </size> + </property> + <property name="maximumSize"> + <size> + <width>100</width> + <height>16777215</height> + </size> + </property> + <property name="text"> + <string>Browse</string> + </property> + </widget> + </item> + <item row="1" column="0"> + <widget class="QLabel" name="label_datamode"> + <property name="sizePolicy"> + <sizepolicy hsizetype="Fixed" vsizetype="Preferred"> + <horstretch>0</horstretch> + <verstretch>0</verstretch> + </sizepolicy> + </property> + <property name="minimumSize"> + <size> + <width>80</width> + <height>0</height> + </size> + </property> + <property name="text"> + <string>Download Mode</string> + </property> + </widget> + </item> + <item row="1" column="2"> + <widget class="QPushButton" name="pushButton_downloadExpData"> + <property name="font"> + <font> + <weight>75</weight> + <bold>true</bold> + </font> + </property> + <property name="toolTip"> + <string><html><head/><body><p><span style=" font-weight:400;">Download scans specified by 'Scans List'; </span></p><p><span style=" font-weight:400;">If 'Scans List' is empty, then the complete experiment data will be downloaded</span></p></body></html></string> + </property> + <property name="text"> + <string>Download</string> + </property> + </widget> + </item> + <item row="0" column="6"> + <spacer name="horizontalSpacer_2"> + <property name="orientation"> + <enum>Qt::Horizontal</enum> + </property> + <property name="sizeType"> + <enum>QSizePolicy::Minimum</enum> + </property> + <property name="sizeHint" stdset="0"> + <size> + <width>40</width> + <height>20</height> + </size> + </property> + </spacer> + </item> + <item row="0" column="0"> + <widget class="QLabel" name="label_4"> + <property name="sizePolicy"> + <sizepolicy hsizetype="Fixed" vsizetype="Preferred"> + <horstretch>0</horstretch> + <verstretch>0</verstretch> + </sizepolicy> + </property> + <property name="minimumSize"> + <size> + <width>140</width> + <height>0</height> + </size> + </property> + <property name="maximumSize"> + <size> + <width>140</width> + <height>16777215</height> + </size> + </property> + <property name="text"> + <string>Destination</string> + </property> + </widget> + </item> + <item row="2" column="2"> + <widget class="QPushButton" name="pushButton_ListScans"> + <property name="text"> + <string>List Scans</string> + </property> + </widget> + </item> + <item row="2" column="0"> + <widget class="QLabel" name="label"> + <property name="sizePolicy"> + <sizepolicy hsizetype="Fixed" vsizetype="Preferred"> + <horstretch>0</horstretch> + <verstretch>0</verstretch> + </sizepolicy> + </property> + <property name="minimumSize"> + <size> + <width>80</width> + <height>0</height> + </size> + </property> + <property name="text"> + <string>Scans List</string> + </property> + </widget> + </item> + <item row="3" column="1"> + <spacer name="verticalSpacer"> + <property name="orientation"> + <enum>Qt::Vertical</enum> + </property> + <property name="sizeHint" stdset="0"> + <size> + <width>20</width> + <height>40</height> + </size> + </property> + </spacer> + </item> + </layout> + </widget> + </item> + </layout> + </widget> + <resources/> + <connections/> +</ui> diff --git a/scripts/HFIR_4Circle_Reduction/multi_threads_helpers.py b/scripts/HFIR_4Circle_Reduction/multi_threads_helpers.py index dd386ed22f7f0647d81aa42e79d0f19b3eecdf20..3b99b54cc3fd2d5767eec72282a0f14dad6a6981 100644 --- a/scripts/HFIR_4Circle_Reduction/multi_threads_helpers.py +++ b/scripts/HFIR_4Circle_Reduction/multi_threads_helpers.py @@ -1,5 +1,6 @@ #pylint: disable=W0403,R0913,R0902 from __future__ import (absolute_import, division, print_function) +import os from PyQt4 import QtCore from PyQt4.QtCore import QThread @@ -70,7 +71,7 @@ class AddPeaksThread(QThread): # merge peak status, err_msg = self._mainWindow.controller.merge_pts_in_scan( - self._expNumber, scan_number, []) + self._expNumber, scan_number, [], False, self._mainWindow.controller.pre_processed_dir) # continue to the next scan if there is something wrong if status is False: @@ -160,6 +161,9 @@ class IntegratePeaksThread(QThread): self._numBgPtRight = num_pt_bg_right self._scaleFactor = scale_factor + # other about preprocessed options + self._checkPreprocessedScans = True + # link signals self.peakMergeSignal.connect(self._mainWindow.update_merge_value) self.mergeMsgSignal.connect(self._mainWindow.update_merge_message) @@ -193,9 +197,14 @@ class IntegratePeaksThread(QThread): if merged is False: merged_ws_name = 'X' try: - status, ret_tup = self._mainWindow.controller.merge_pts_in_scan(exp_no=self._expNumber, - scan_no=scan_number, - pt_num_list=pt_number_list) + pre_dir = self._mainWindow.controller.pre_processed_dir + status, ret_tup = \ + self._mainWindow.controller.merge_pts_in_scan(exp_no=self._expNumber, + scan_no=scan_number, + pt_num_list=pt_number_list, + rewrite=False, + preprocessed_dir=pre_dir) + if status: merged_ws_name = str(ret_tup[0]) error_message = '' @@ -380,6 +389,11 @@ class MergePeaksThread(QThread): if md_file_list is not None: self._outputMDFileList = md_file_list[:] + # other about preprocessed options + self._checkPreprocessedScans = False + self._preProcessedDir = None + self._redoMerge = True + # link signals self.mergeMsgSignal.connect(self._mainWindow.update_merge_value) self.saveMsgSignal.connect(self._mainWindow.update_file_name) @@ -420,7 +434,9 @@ class MergePeaksThread(QThread): try: status, ret_tup = self._mainWindow.controller.merge_pts_in_scan(exp_no=self._expNumber, scan_no=scan_number, - pt_num_list=pt_number_list) + pt_num_list=pt_number_list, + rewrite=self._redoMerge, + preprocessed_dir=self._preProcessedDir) if status: merged_ws_name = str(ret_tup[0]) error_message = '' @@ -452,7 +468,42 @@ class MergePeaksThread(QThread): # merging error self.mergeMsgSignal.emit(scan_number, error_message) continue - # self._mainWindow.ui.tableWidget_mergeScans.set_status(scan_number, 'Merged') # END-IF return + + def set_pre_process_options(self, option_to_use, pre_process_dir): + """ + set the pre-process options + :param option_to_use: + :param pre_process_dir: + :return: + """ + # check + assert isinstance(option_to_use, bool), 'Option to use pre-process must be a boolean but not a {0}.' \ + ''.format(type(option_to_use)) + + self._checkPreprocessedScans = option_to_use + + if self._checkPreprocessedScans: + assert isinstance(pre_process_dir, str), 'Directory {0} to store preprocessed data must be a string ' \ + 'but not a {1).'.format(pre_process_dir, type(pre_process_dir)) + if os.path.exists(pre_process_dir) is False: + raise RuntimeError('Directory {0} does not exist.'.format(pre_process_dir)) + self._preProcessedDir = pre_process_dir + # END-IF + + return + + def set_rewrite(self, flag): + """ + set the flag to re-merge the scan regardless whether the target workspace is in memory + or a pre-processed MD workspace does exist. + :param flag: + :return: + """ + assert isinstance(flag, bool), 'Re-merge/re-write flag must be a boolean but not a {0}'.format(type(flag)) + + self._redoMerge = flag + + return diff --git a/scripts/HFIR_4Circle_Reduction/peakprocesshelper.py b/scripts/HFIR_4Circle_Reduction/peakprocesshelper.py index a69a2c19c6e8cd9b97ad3cfe677bf49682356d4f..280e56846ebf41f86c501b591a320d97d9c37df9 100644 --- a/scripts/HFIR_4Circle_Reduction/peakprocesshelper.py +++ b/scripts/HFIR_4Circle_Reduction/peakprocesshelper.py @@ -1,6 +1,7 @@ #pylint: disable=W0403,R0902 from __future__ import (absolute_import, division, print_function) from six.moves import range +import numpy import time import random from HFIR_4Circle_Reduction.fourcircle_utility import * diff --git a/scripts/HFIR_4Circle_Reduction/pre_process_table.py b/scripts/HFIR_4Circle_Reduction/pre_process_table.py index 3407042c410c63d75810f3312f69d233dde3411c..0ce5680b767a7175f32a654c65f04a9a6e813f8c 100644 --- a/scripts/HFIR_4Circle_Reduction/pre_process_table.py +++ b/scripts/HFIR_4Circle_Reduction/pre_process_table.py @@ -50,14 +50,14 @@ class ScanPreProcessStatusTable(NTableWidget.NTableWidget): return - def add_new_scans(self, scan_numbers, append): + def add_new_scans(self, scan_numbers): """ add scans to the :param scan_numbers: :return: """ # check input - assert isinstance(scan_numbers, list), 'blabla' + assert isinstance(scan_numbers, list), 'Scan numbers must be given in a list.' # sort scan_numbers.sort() @@ -70,7 +70,6 @@ class ScanPreProcessStatusTable(NTableWidget.NTableWidget): continue # append scan - print '[DB...BAT] Append row for scan {0}'.format(scan_number) status, msg = self.append_row([scan_number, '', '', '']) if not status: raise RuntimeError('Failed to append a new row due to {0}'.format(msg)) @@ -98,7 +97,11 @@ class ScanPreProcessStatusTable(NTableWidget.NTableWidget): :param status: :return: """ - # check ... blabla + # check inputs + assert isinstance(row_number, int), 'Row number {0} must be an integer.'.format(row_number) + status = str(status) + if not isinstance(status, str): + print ('[DB] status is an instance of {0}.'.format(type(status))) self.update_cell_value(row_number, self._iColStatus, status) diff --git a/scripts/HFIR_4Circle_Reduction/reduce4circleControl.py b/scripts/HFIR_4Circle_Reduction/reduce4circleControl.py index db2563a436ea0d247e8c5fc61ffd1c39eaf5203b..4e86b2bffbcf1b612332dac5e95563cc495a027e 100644 --- a/scripts/HFIR_4Circle_Reduction/reduce4circleControl.py +++ b/scripts/HFIR_4Circle_Reduction/reduce4circleControl.py @@ -9,12 +9,22 @@ # ################################################################################ from __future__ import (absolute_import, division, print_function) +try: + # python3 + from urllib.request import urlopen + from urllib.error import HTTPError + from urllib.error import URLError +except ImportError: + from urllib2 import urlopen + from urllib2 import HTTPError + from urllib2 import URLError from six.moves import range import csv import random import os from HFIR_4Circle_Reduction.fourcircle_utility import * +import HFIR_4Circle_Reduction.fourcircle_utility as fourcircle_utility from HFIR_4Circle_Reduction.peakprocesshelper import PeakProcessRecord from HFIR_4Circle_Reduction import fputility from HFIR_4Circle_Reduction import project_manager @@ -55,6 +65,9 @@ class CWSCDReductionControl(object): self._dataDir = None self._workDir = '/tmp' + self._preprocessedDir = None + # dictionary for pre-processed scans. key = scan number, value = dictionary for all kinds of information + self._preprocessedInfoDict = None self._myServerURL = '' @@ -97,7 +110,6 @@ class CWSCDReductionControl(object): # Record for merged scans self._mergedWSManager = list() - # Region of interest: key = (experiment, scan), value = 2-tuple of 2-tuple: ( (lx, ly), (ux, uy)) self._roiDict = dict() @@ -123,6 +135,39 @@ class CWSCDReductionControl(object): return + @property + def pre_processed_dir(self): + """ + get the pre-processed directory + :return: + """ + return self._preprocessedDir + + @pre_processed_dir.setter + def pre_processed_dir(self, dir_name): + """ + setting pre-processed directory + :param dir_name: + :return: + """ + # check + assert isinstance(dir_name, str) or dir_name is None, 'Directory {0} must be None or string.'.format(dir_name) + + if os.path.exists(dir_name) is False: + raise RuntimeError('Pre-processed scans directory {0} does not exist!'.format(dir_name)) + + # set + self._preprocessedDir = dir_name + + # load pre-processed scans' record file if possible + if self._expNumber is None: + raise RuntimeError('Experiment number {0} must be set up before pre-processesd scan directory is set.') + record_file_name = fourcircle_utility.pre_processed_record_file(self._expNumber, self._preprocessedDir) + if os.path.exists(record_file_name): + self._preprocessedInfoDict = fourcircle_utility.read_pre_process_record(record_file_name) + + return + def _add_merged_ws(self, exp_number, scan_number, pt_number_list): """ Record a merged workspace to Requirements: experiment number, scan number and pt numbers are valid @@ -555,6 +600,20 @@ class CWSCDReductionControl(object): return self._myUBMatrixDict[exp_number] + def get_calibrated_wave_length(self, exp_number): + """ Get the user specified (i.e., calibrated) wave length for a specific experiment + :param exp_number: + :return: + """ + # check inputs + assert isinstance(exp_number, int), 'Experiment numbe {0} must be an integer but not a {1}' \ + ''.format(exp_number, type(exp_number)) + + if exp_number not in self._userWavelengthDict: + return None + + return self._userWavelengthDict[exp_number] + def get_wave_length(self, exp_number, scan_number_list): """ Get the wavelength. @@ -1587,26 +1646,109 @@ class CWSCDReductionControl(object): return binning_script - def merge_pts_in_scan(self, exp_no, scan_no, pt_num_list): + def is_calibration_match(self, exp_number, scan_number): """ - Merge Pts in Scan - All the workspaces generated as internal results will be grouped - Requirements: - 1. target_frame must be either 'q-sample' or 'hkl' - 2. pt_list must be a list. an empty list means to merge all Pts. in the scan - Guarantees: An MDEventWorkspace is created containing merged Pts. + check whether the pre-processed data has a set of matching calibrated parameters comparing to + the current one + :param exp_number: + :param scan_number: + :return: + """ + # no record is found. it should not happen! + if scan_number not in self._preprocessedInfoDict: + print ('[DB...BAT] Scan {0} is not in pre-processed scan information dictionary. keys are ' + '{1}'.format(scan_number, self._preprocessedInfoDict.keys())) + return False + + # check others + unmatch_score = 0 + + # center + center_x, center_y = self.get_calibrated_det_center(exp_number) + if (center_x, center_y) != self._preprocessedInfoDict[scan_number]['Center']: + unmatch_score += 2 + + # wave length + wavelength = self.get_calibrated_wave_length(exp_number) + record_lambda = self._preprocessedInfoDict[scan_number]['WaveLength'] + if type(record_lambda) != type(wavelength): + unmatch_score += 20 + elif wavelength is not None and abs(wavelength - record_lambda) > 1.E-5: + unmatch_score += 40 + + # detector distance + det_sample_distance = self.get_calibrated_det_sample_distance(exp_number) + record_distance = self._preprocessedInfoDict[scan_number]['DetSampleDistance'] + if type(det_sample_distance) != type(record_distance): + unmatch_score += 200 + elif det_sample_distance is not None and abs(det_sample_distance - record_distance) > 1.E-5: + unmatch_score += 400 + + if unmatch_score > 0: + print('[INFO] Exp {0} Scan {1} has a unmatched calibrated record from pre-processed data. ID = {2}' + ''.format(exp_number, scan_number, unmatch_score)) + return False + + print('[INFO] Exp {0} Scan {1} has a matched calibrated record from pre-processed data.') + + return True + + def load_preprocessed_scan(self, exp_number, scan_number, md_dir, output_ws_name): + """ load preprocessed scan from hard disk + :return: + """ + # check inputs + assert isinstance(exp_number, int), 'Experiment number {0} ({1}) must be an integer' \ + ''.format(exp_number, type(exp_number)) + assert isinstance(scan_number, int), 'Scan number {0} ({1}) must be an integer.' \ + ''.format(scan_number, type(scan_number)) + assert isinstance(md_dir, str), 'MD file directory {0} ({1}) must be a string.' \ + ''.format(md_dir, type(md_dir)) + assert isinstance(output_ws_name, str), 'Output workspace name {0} ({1}) must be a string.' \ + ''.format(output_ws_name, type(output_ws_name)) + + if os.path.exists(md_dir) is False: + raise RuntimeError('Pre-processed directory {0} does not exist.'.format(md_dir)) + + # ws_name = 'Exp{0}_Scan{1}_MD'.format(exp_number, scan_number) + # md_file_path = os.path.join(md_dir, ws_name + '.nxs') + + # 2-ways to get file name + if self._preprocessedInfoDict is None or scan_number not in self._preprocessedInfoDict: + md_file_path = fourcircle_utility.pre_processed_file_name(exp_number, scan_number, md_dir) + else: + md_file_path = self._preprocessedInfoDict[scan_number]['MD'] + + # check + if os.path.exists(md_file_path) is False: + print ('[WARNING] MD file {0} does not exist.'.format(md_file_path)) + return False + + # load and check + status = False + try: + # load + mantidsimple.LoadMD(Filename=md_file_path, OutputWorkspace=output_ws_name) + # check + status = AnalysisDataService.doesExist(output_ws_name) + print ('[INFO] {0} is loaded from {1} with status {2}' + ''.format(output_ws_name, md_file_path, status)) + except RuntimeError as run_err: + print('[DB] Unable to load file {0} due to RuntimeError {1}.'.format(md_file_path, run_err)) + except OSError as run_err: + print('[DB] Unable to load file {0} due to OSError {1}.'.format(md_file_path, run_err)) + except IOError as run_err: + print('[DB] Unable to load file {0} due to IOError {1}.'.format(md_file_path, run_err)) + + return status + + def _process_pt_list(self, exp_no, scan_no, pt_num_list): + """ + convert list of Pt (in int) to a string like a list of integer :param exp_no: :param scan_no: - :param pt_num_list: If empty, then merge all Pt. in the scan - :return: (boolean, error message) # (merged workspace name, workspace group name) + :return: """ - # Check - if exp_no is None: - exp_no = self._expNumber - assert isinstance(exp_no, int) and isinstance(scan_no, int) - assert isinstance(pt_num_list, list), 'Pt number list must be a list but not %s' % str(type(pt_num_list)) - - # Get list of Pt. if len(pt_num_list) > 0: # user specified pt_num_list = pt_num_list @@ -1633,9 +1775,93 @@ class CWSCDReductionControl(object): if pt_list_str == '-1': return False, err_msg + return True, (pt_num_list, pt_list_str) + + def merge_pts_in_scan(self, exp_no, scan_no, pt_num_list, rewrite, preprocessed_dir): + """ + Merge Pts in Scan + All the workspaces generated as internal results will be grouped + Requirements: + 1. target_frame must be either 'q-sample' or 'hkl' + 2. pt_list must be a list. an empty list means to merge all Pts. in the scan + Guarantees: An MDEventWorkspace is created containing merged Pts. + :param exp_no: + :param scan_no: + :param pt_num_list: If empty, then merge all Pt. in the scan + :param rewrite: if True, then the data will be re-merged regardless workspace exists or not + :param preprocessed_dir: If None, then merge Pts. Otherwise, try to search and load preprocessed data first + :return: (boolean, error message) # (merged workspace name, workspace group name) + """ + # Check + if exp_no is None: + exp_no = self._expNumber + assert isinstance(exp_no, int) and isinstance(scan_no, int) + assert isinstance(pt_num_list, list), 'Pt number list must be a list but not %s' % str(type(pt_num_list)) + + # Get list of Pt. + status, ret_obj = self._process_pt_list(exp_no, scan_no, pt_num_list) + if not status: + error_msg = ret_obj + return False, error_msg + pt_num_list, pt_list_str = ret_obj + # if len(pt_num_list) > 0: + # # user specified + # pt_num_list = pt_num_list + # else: + # # default: all Pt. of scan + # status, pt_num_list = self.get_pt_numbers(exp_no, scan_no) + # if status is False: + # err_msg = pt_num_list + # return False, err_msg + # # END-IF-ELSE + # + # # construct a list of Pt as the input of CollectHB3AExperimentInfo + # pt_list_str = '-1' # header + # err_msg = '' + # for pt in pt_num_list: + # # Download file + # try: + # self.download_spice_xml_file(scan_no, pt, exp_no=exp_no, overwrite=False) + # except RuntimeError as e: + # err_msg += 'Unable to download xml file for pt %d due to %s\n' % (pt, str(e)) + # continue + # pt_list_str += ',%d' % pt + # # END-FOR (pt) + # if pt_list_str == '-1': + # return False, err_msg + # create output workspace's name out_q_name = get_merged_md_name(self._instrumentName, exp_no, scan_no, pt_num_list) - if AnalysisDataService.doesExist(out_q_name) is False: + + # find out the cases that rewriting is True + print ('[DB...BAT] Rewrite = {0}'.format(rewrite)) + + if not rewrite: + print ('[DB...BAT] pre-processed dir: {0}'.format(preprocessed_dir)) + + if AnalysisDataService.doesExist(out_q_name): + # not re-write, target workspace exists + pass + elif preprocessed_dir is not None: + # not re-write, target workspace does not exist, attempt to load from preprocessed + if self.is_calibration_match(exp_no, scan_no): + data_loaded = self.load_preprocessed_scan(exp_number=exp_no, + scan_number=scan_no, + md_dir=preprocessed_dir, + output_ws_name=out_q_name) + rewrite = not data_loaded + else: + rewrite = True + else: + print ('[WARNING] Target MDWorkspace does not exist. And preprocessed directory is not given ' + '. Why re-write flag is turned off in the first place?') + rewrite = True + # END-IF (ADS) + # END-IF (rewrite) + + # now to load the data + # check whether it is an option load preprocessed (merged) data + if rewrite: # collect HB3A Exp/Scan information # - construct a configuration with 1 scan and multiple Pts. scan_info_table_name = get_merge_pt_info_ws_name(exp_no, scan_no) @@ -1798,6 +2024,21 @@ class CWSCDReductionControl(object): return + def get_calibrated_det_center(self, exp_number): + """ + get calibrated/user-specified detector center or the default center + :param exp_number: + :return: 2-tuple (int, int) as pixel ID in X and Y directory + """ + # check inputs + assert isinstance(exp_number, int), 'Experiment number {0} ({1}) must be an integer.' \ + ''.format(exp_number, type(exp_number)) + + if exp_number not in self._detCenterDict: + return self._defaultDetectorCenter + + return self._detCenterDict[exp_number] + def set_detector_center(self, exp_number, center_row, center_col, default=False): """ Set detector center @@ -1817,9 +2058,9 @@ class CWSCDReductionControl(object): ''.format(center_col, type(center_col)) if default: - self._defaultDetectorCenter = (center_row, center_col) + self._defaultDetectorCenter = center_row, center_col else: - self._detCenterDict[exp_number] = (center_row, center_col) + self._detCenterDict[exp_number] = center_row, center_col return @@ -1841,6 +2082,20 @@ class CWSCDReductionControl(object): return + def get_calibrated_det_sample_distance(self, exp_number): + """ + + :param exp_number: + :return: + """ + # check inputs + assert isinstance(exp_number, int) and exp_number > 0, 'Experiment number must be integer' + + if exp_number not in self._detSampleDistanceDict: + return None + + return self._detSampleDistanceDict[exp_number] + def set_detector_sample_distance(self, exp_number, sample_det_distance): """ set instrument's detector - sample distance @@ -1915,10 +2170,10 @@ class CWSCDReductionControl(object): is_url_good = False error_message = None try: - result = urllib2.urlopen(self._myServerURL) - except urllib2.HTTPError as err: + result = urlopen(self._myServerURL) + except HTTPError as err: error_message = str(err.code) - except urllib2.URLError as err: + except URLError as err: error_message = str(err.args) else: is_url_good = True @@ -2286,7 +2541,7 @@ class CWSCDReductionControl(object): :return: """ # check - assert isinstance(tag, str) + assert isinstance(tag, str), 'Tag must be a string' assert len(region_of_interest) == 2 assert len(region_of_interest[0]) == 2 assert len(region_of_interest[1]) == 2 diff --git a/scripts/HFIR_4Circle_Reduction/reduce4circleGUI.py b/scripts/HFIR_4Circle_Reduction/reduce4circleGUI.py index 3bdf52068ccef9520b3fd5913449786695ce348a..da275cd257860a593464a12e63b11551fb3f0a1c 100644 --- a/scripts/HFIR_4Circle_Reduction/reduce4circleGUI.py +++ b/scripts/HFIR_4Circle_Reduction/reduce4circleGUI.py @@ -25,6 +25,7 @@ from HFIR_4Circle_Reduction import peak_integration_utility from HFIR_4Circle_Reduction import FindUBUtility from HFIR_4Circle_Reduction import message_dialog from HFIR_4Circle_Reduction import PreprocessWindow +from HFIR_4Circle_Reduction.downloaddialog import DataDownloadDialog # import line for the UI python class from HFIR_4Circle_Reduction.ui_MainWindow import Ui_MainWindow @@ -81,6 +82,7 @@ class MainWindow(QtGui.QMainWindow): self._mySinglePeakIntegrationDialog = None self._preProcessWindow = None self._singlePeakIntegrationDialogBuffer = '' + self._dataDownloadDialog = None # Make UI scrollable if NO_SCROLL is False: @@ -108,14 +110,6 @@ class MainWindow(QtGui.QMainWindow): self.do_apply_setup) self.connect(self.ui.pushButton_browseLocalDataDir, QtCore.SIGNAL('clicked()'), self.do_browse_local_spice_data) - self.connect(self.ui.pushButton_testURLs, QtCore.SIGNAL('clicked()'), - self.do_test_url) - self.connect(self.ui.pushButton_ListScans, QtCore.SIGNAL('clicked()'), - self.do_list_scans) - self.connect(self.ui.pushButton_downloadExpData, QtCore.SIGNAL('clicked()'), - self.do_download_spice_data) - self.connect(self.ui.comboBox_mode, QtCore.SIGNAL('currentIndexChanged(int)'), - self.do_change_data_access_mode) self.connect(self.ui.pushButton_applyCalibratedSampleDistance, QtCore.SIGNAL('clicked()'), self.do_set_user_detector_distance) self.connect(self.ui.pushButton_applyUserDetCenter, QtCore.SIGNAL('clicked()'), @@ -230,14 +224,12 @@ class MainWindow(QtGui.QMainWindow): self.do_select_all_peaks) # Tab 'Setup' - self.connect(self.ui.pushButton_useDefaultDir, QtCore.SIGNAL('clicked()'), - self.do_setup_dir_default) - self.connect(self.ui.pushButton_browseLocalCache, QtCore.SIGNAL('clicked()'), - self.do_browse_local_cache_dir) self.connect(self.ui.pushButton_browseWorkDir, QtCore.SIGNAL('clicked()'), self.do_browse_working_dir) self.connect(self.ui.comboBox_instrument, QtCore.SIGNAL('currentIndexChanged(int)'), self.do_change_instrument_name) + self.connect(self.ui.pushButton_browsePreprocessed, QtCore.SIGNAL('clicked()'), + self.do_browse_preprocessed_dir) # Tab 'UB Matrix' self.connect(self.ui.pushButton_showUB2Edit, QtCore.SIGNAL('clicked()'), @@ -321,6 +313,10 @@ class MainWindow(QtGui.QMainWindow): self.connect(self.ui.actionPre_Processing, QtCore.SIGNAL('triggered()'), self.menu_pre_process) + # menu + self.connect(self.ui.actionData_Downloading, QtCore.SIGNAL('triggered()'), + self.menu_download_data) + # Validator ... (NEXT) # Declaration of class variable @@ -423,8 +419,8 @@ class MainWindow(QtGui.QMainWindow): self.ui.radioButton_ubSelectNoScan.setChecked(True) # Tab 'Access' - self.ui.lineEdit_url.setText('http://neutron.ornl.gov/user_data/hb3a/') - self.ui.comboBox_mode.setCurrentIndex(0) + # self.ui.lineEdit_url.setText('http://neutron.ornl.gov/user_data/hb3a/') + # self.ui.comboBox_mode.setCurrentIndex(0) self.ui.lineEdit_localSpiceDir.setEnabled(True) self.ui.pushButton_browseLocalDataDir.setEnabled(True) @@ -438,6 +434,9 @@ class MainWindow(QtGui.QMainWindow): # background points self.ui.lineEdit_backgroundPts.setText('1, 1') + # about pre-processed data + self.ui.checkBox_searchPreprocessedFirst.setChecked(True) + return def _build_peak_info_list(self, zero_hkl, is_spice=True): @@ -563,9 +562,10 @@ class MainWindow(QtGui.QMainWindow): self._myControl.save_project(project_file_name, ui_dict) - # TODO/NOW/TODAY - Implement a pop-up dialog for this + # show user the message that the saving process is over information = 'Project has been saved to {0}\n'.format(project_file_name), information += 'Including dictionary keys: {0}'.format(ui_dict) + self.pop_one_button_dialog(information) print('[INFO]\n{0}'.format(information)) return @@ -751,7 +751,7 @@ class MainWindow(QtGui.QMainWindow): # """ Add current to ub peaks # :return: # """ - # # TODO/FIXME/ISSUE/NOW - Find out whether this method is still needed + # # TODO//ISSUE/Future - Find out whether this method is still needed # # Add peak # status, int_list = gutil.parse_integers_editors([self.ui.lineEdit_exp, # self.ui.lineEdit_scanNumber]) @@ -880,14 +880,13 @@ class MainWindow(QtGui.QMainWindow): # get data directory, working directory and data server URL from GUI local_data_dir = str(self.ui.lineEdit_localSpiceDir.text()).strip() working_dir = str(self.ui.lineEdit_workDir.text()).strip() - data_server = str(self.ui.lineEdit_url.text()).strip() + pre_process_dir = str(self.ui.lineEdit_preprocessedDir.text()).strip() # set to my controller status, err_msg = self._myControl.set_local_data_dir(local_data_dir) if not status: raise RuntimeError(err_msg) self._myControl.set_working_directory(working_dir) - self._myControl.set_server_url(data_server, check_link=False) # check error_message = '' @@ -923,35 +922,40 @@ class MainWindow(QtGui.QMainWindow): self.ui.lineEdit_workDir.setStyleSheet("color: green;") # END-IF-ELSE - # Set the URL red as it is better not check at this stage. Leave it to user - self.ui.lineEdit_url.setStyleSheet("color: black;") + # preprocess directory + if len(pre_process_dir) == 0: + # user does not specify + self._myControl.pre_processed_dir = None + elif os.path.exists(pre_process_dir): + # user specifies a valid directory + self._myControl.pre_processed_dir = pre_process_dir + self.ui.lineEdit_preprocessedDir.setStyleSheet('color: green;') + else: + # user specifies a non-exist directory. make an error message + self.pop_one_button_dialog('Pre-processed directory {0} ({1}) does not exist.' + ''.format(pre_process_dir, type(pre_process_dir))) + self._myControl.pre_processed_dir = None + self.ui.lineEdit_preprocessedDir.setStyleSheet('color: red;') + # END-IF if len(error_message) > 0: self.pop_one_button_dialog(error_message) return - def do_browse_local_cache_dir(self): - """ Browse local cache directory + def do_browse_preprocessed_dir(self): + """ browse the pre-processed merged scans' directory :return: """ - local_cache_dir = str(QtGui.QFileDialog.getExistingDirectory(self, - 'Get Local Cache Directory', - self._homeSrcDir)) - - # Set local directory to control - status, error_message = self._myControl.set_local_data_dir(local_cache_dir) - if status is False: - self.pop_one_button_dialog(error_message) - return + # determine default directory + exp_number_str = str(self.ui.lineEdit_exp.text()) + default_pp_dir = os.path.join('/HFIR/HB3A/exp{0}/Shared/'.format(exp_number_str)) + if not os.path.exists(default_pp_dir): + default_pp_dir = os.path.expanduser('~') - # Synchronize to local data/spice directory and local cache directory - if str(self.ui.lineEdit_localSpiceDir.text()) != '': - prev_dir = str(self.ui.lineEdit_localSrcDir.text()) - self.pop_one_button_dialog('Local data directory was set up as %s' % - prev_dir) - self.ui.lineEdit_localSrcDir.setText(local_cache_dir) - self.ui.lineEdit_localSpiceDir.setText(local_cache_dir) + # use FileDialog to get the directory and set to preprocessedDir + pp_dir = str(QtGui.QFileDialog.getExistingDirectory(self, 'Get Directory', default_pp_dir)) + self.ui.lineEdit_preprocessedDir.setText(pp_dir) return @@ -1026,31 +1030,6 @@ class MainWindow(QtGui.QMainWindow): return - def do_change_data_access_mode(self): - """ Change data access mode between downloading from server and local - Event handling methods - :return: - """ - new_mode = str(self.ui.comboBox_mode.currentText()) - self._dataAccessMode = new_mode - - if new_mode.startswith('Local') is True: - self.ui.lineEdit_localSpiceDir.setEnabled(True) - self.ui.pushButton_browseLocalDataDir.setEnabled(True) - self.ui.lineEdit_url.setEnabled(False) - self.ui.lineEdit_localSrcDir.setEnabled(False) - self.ui.pushButton_browseLocalCache.setEnabled(False) - self._allowDownload = False - else: - self.ui.lineEdit_localSpiceDir.setEnabled(False) - self.ui.pushButton_browseLocalDataDir.setEnabled(False) - self.ui.lineEdit_url.setEnabled(True) - self.ui.lineEdit_localSrcDir.setEnabled(True) - self.ui.pushButton_browseLocalCache.setEnabled(True) - self._allowDownload = True - - return - def do_change_instrument_name(self): """ Handing the event as the instrument name is changed :return: @@ -1167,54 +1146,7 @@ class MainWindow(QtGui.QMainWindow): return - def do_download_spice_data(self): - """ Download SPICE data - :return: - """ - # Check scans to download - scan_list_str = str(self.ui.lineEdit_downloadScans.text()) - if len(scan_list_str) > 0: - # user specifies scans to download - valid, scan_list = hb3a_util.parse_int_array(scan_list_str) - if valid is False: - error_message = scan_list - self.pop_one_button_dialog(error_message) - else: - # Get all scans - status, ret_obj = gutil.parse_integers_editors([self.ui.lineEdit_exp]) - if status is False: - self.pop_one_button_dialog(ret_obj) - return - exp_no = ret_obj - assert isinstance(exp_no, int) - server_url = str(self.ui.lineEdit_url.text()) - scan_list = hb3a_util.get_scans_list(server_url, exp_no, return_list=True) - self.pop_one_button_dialog('Going to download scans %s.' % str(scan_list)) - - # Check location - destination_dir = str(self.ui.lineEdit_localSrcDir.text()) - status, error_message = self._myControl.set_local_data_dir(destination_dir) - if status is False: - self.pop_one_button_dialog(error_message) - else: - self.pop_one_button_dialog('Spice files will be downloaded to %s.' % destination_dir) - - # Set up myControl for downloading data - exp_no = int(self.ui.lineEdit_exp.text()) - self._myControl.set_exp_number(exp_no) - - server_url = str(self.ui.lineEdit_url.text()) - status, error_message = self._myControl.set_server_url(server_url) - if status is False: - self.pop_one_button_dialog(error_message) - return - - # Download - self._myControl.download_data_set(scan_list) - - return - - def find_peak_in_scan(self , scan_number, load_spice_hkl): + def find_peak_in_scan(self, scan_number, load_spice_hkl): """ Find peak in a given scan and record it """ # Get experiment, scan and pt @@ -1227,7 +1159,8 @@ class MainWindow(QtGui.QMainWindow): # merge peak if necessary if self._myControl.has_merged_data(exp_no, scan_number) is False: - status, err_msg = self._myControl.merge_pts_in_scan(exp_no, scan_number, []) + status, err_msg = self._myControl.merge_pts_in_scan(exp_no, scan_number, [], rewrite=True, + preprocessed_dir=self._myControl.pre_processed_dir) if status is False: self.pop_one_button_dialog(err_msg) @@ -1680,25 +1613,6 @@ class MainWindow(QtGui.QMainWindow): return - def do_list_scans(self): - """ List all scans available - :return: - """ - # Experiment number - exp_no = int(self.ui.lineEdit_exp.text()) - - access_mode = str(self.ui.comboBox_mode.currentText()) - if access_mode == 'Local': - spice_dir = str(self.ui.lineEdit_localSpiceDir.text()) - message = hb3a_util.get_scans_list_local_disk(spice_dir, exp_no) - else: - url = str(self.ui.lineEdit_url.text()) - message = hb3a_util.get_scans_list(url, exp_no) - - self.pop_one_button_dialog(message) - - return - def do_load_scan_info(self): """ Load SIICE's scan file :return: @@ -2041,7 +1955,8 @@ class MainWindow(QtGui.QMainWindow): self.ui.tableWidget_mergeScans.set_status(row_number, 'In Processing') status, ret_tup = self._myControl.merge_pts_in_scan(exp_no=exp_number, scan_no=scan_number, - pt_num_list=[]) + pt_num_list=[], rewrite=False, + preprocessed_dir=self._myControl.pre_processed_dir) # find peaks too status, ret_obj = self._myControl.find_peak(exp_number, scan_number) @@ -2197,8 +2112,10 @@ class MainWindow(QtGui.QMainWindow): self.ui.lineEdit_infoDetCenter.setText('{0}, {1}'.format(center_row, center_col)) self._myControl.set_detector_center(exp_number, center_row, center_col) - # TODO/ISSUE/NOW/TODAY - Shall pop out a dialog to notify the completion - print('[INFO] Project from file {0} is loaded.'.format(project_file_name)) + # pop out a dialog to notify the completion + message = 'Project from file {0} is loaded.'.format(project_file_name) + self.pop_one_button_dialog(message) + print('[INFO] {0}'.format(message)) return @@ -3111,21 +3028,6 @@ class MainWindow(QtGui.QMainWindow): return - def do_test_url(self): - """ Test whether the root URL provided specified is good - """ - url = str(self.ui.lineEdit_url.text()) - - url_is_good, err_msg = hb3a_util.check_url(url) - if url_is_good is True: - self.pop_one_button_dialog("URL %s is valid." % url) - self.ui.lineEdit_url.setStyleSheet("color: green;") - else: - self.pop_one_button_dialog(err_msg) - self.ui.lineEdit_url.setStyleSheet("color: read;") - - return url_is_good - def do_view_data_set_3d(self): """ Launch the sub window to view merged data in 3D. @@ -3482,6 +3384,23 @@ class MainWindow(QtGui.QMainWindow): return + def menu_download_data(self): + """ launch a dialog for user to download data + :return: + """ + # create the dialog instance if it is not created + if self._dataDownloadDialog is None: + self._dataDownloadDialog = DataDownloadDialog(self) + + # set the experiment number + exp_number = int(self.ui.lineEdit_exp.text()) + self._dataDownloadDialog.set_experiment_number(exp_number) + + # show the dialog + self._dataDownloadDialog.show() + + return + def menu_quit(self): """ @@ -3491,8 +3410,7 @@ class MainWindow(QtGui.QMainWindow): self.close() def menu_pre_process(self): - """ - blabla + """ handling action to trigger menu pre-process :return: """ # initialize the pre processing window if it is not initialized @@ -3507,6 +3425,8 @@ class MainWindow(QtGui.QMainWindow): self._preProcessWindow.show() # setup the parameters + # TODO/FUTURE - Add a push button somewhere to force pre-processing menu to synchronize with main UI for + # TODO instrument calibration if reset_pre_process_window: exp_number = int(str(self.ui.lineEdit_exp.text())) # detector size/pixel numbers @@ -3577,16 +3497,23 @@ class MainWindow(QtGui.QMainWindow): :param peak_info: :return: """ - # Check requirements - assert isinstance(peak_info, r4c.PeakProcessRecord) + # Check requirement + assert isinstance(peak_info, r4c.PeakProcessRecord), 'Peak information instance must be a PeakProcessedRecord' \ + 'but not a {0}'.format(type(peak_info)) # Get data exp_number, scan_number = peak_info.get_experiment_info() h, k, l = peak_info.get_hkl(user_hkl=False) q_x, q_y, q_z = peak_info.get_peak_centre() + # wave length m1 = self._myControl.get_sample_log_value(exp_number, scan_number, 1, '_m1') - # TODO/ISSUE/NOW consider user specified - wave_length = hb3a_util.convert_to_wave_length(m1_position=m1) + user_wave_length = self._myControl.get_calibrated_wave_length(exp_number) + if user_wave_length is None: + # no user specified wave length + wave_length = hb3a_util.convert_to_wave_length(m1_position=m1) + else: + # user specified is found + wave_length = user_wave_length # Set to table status, err_msg = self.ui.tableWidget_peaksCalUB.add_peak(scan_number, (h, k, l), (q_x, q_y, q_z), m1, diff --git a/scripts/SANS/sans/gui_logic/gui_common.py b/scripts/SANS/sans/gui_logic/gui_common.py index b9ab0fe69cae6efbe66c606472aba7ad6c11fafa..1b97b59e83df5fc3d2b254b76cf8c9349cac9224 100644 --- a/scripts/SANS/sans/gui_logic/gui_common.py +++ b/scripts/SANS/sans/gui_logic/gui_common.py @@ -18,8 +18,9 @@ CAN_TRANSMISSION_PERIOD_INDEX = 9 CAN_DIRECT_INDEX = 10 CAN_DIRECT_PERIOD_INDEX = 11 OUTPUT_NAME_INDEX = 12 -OPTIONS_INDEX = 13 -HIDDEN_OPTIONS_INDEX = 14 +USER_FILE_INDEX = 13 +OPTIONS_INDEX = 14 +HIDDEN_OPTIONS_INDEX = 15 OPTIONS_SEPARATOR = "," OPTIONS_EQUAL = "=" diff --git a/scripts/SANS/sans/gui_logic/models/table_model.py b/scripts/SANS/sans/gui_logic/models/table_model.py index 2c4df2ffcba73c2f9df454e3aad103a4c96e787b..ce7563ccd4b8d521378b5b0650ef8f0163086c82 100644 --- a/scripts/SANS/sans/gui_logic/models/table_model.py +++ b/scripts/SANS/sans/gui_logic/models/table_model.py @@ -35,6 +35,12 @@ class TableModel(object): self._validate_file_name(value) self._user_file = value + def get_row_user_file(self, row_index): + if row_index in self._table_entries: + return self._table_entries[row_index].user_file + else: + raise IndexError("The row {} does not exist.".format(row_index)) + @property def batch_file(self): return self._batch_file @@ -63,7 +69,7 @@ class TableIndexModel(object): can_scatter, can_scatter_period, can_transmission, can_transmission_period, can_direct, can_direct_period, - output_name="", options_column_string=""): + output_name="", user_file="", options_column_string=""): super(TableIndexModel, self).__init__() self.index = index self.sample_scatter = sample_scatter @@ -80,7 +86,7 @@ class TableIndexModel(object): self.can_direct = can_direct self.can_direct_period = can_direct_period - self.user_file = "" + self.user_file = user_file self.output_name = output_name # Options column entries diff --git a/scripts/SANS/sans/gui_logic/presenter/run_tab_presenter.py b/scripts/SANS/sans/gui_logic/presenter/run_tab_presenter.py index 92d3a1fc5b4f7e145c32f51fdd4dfac221f63c79..4e5aefe82a7778a61dd66b8c718f1922dbeafe70 100644 --- a/scripts/SANS/sans/gui_logic/presenter/run_tab_presenter.py +++ b/scripts/SANS/sans/gui_logic/presenter/run_tab_presenter.py @@ -30,7 +30,7 @@ from sans.gui_logic.gui_common import (get_reduction_mode_strings_for_gui, CAN_TRANSMISSION_INDEX, CAN_TRANSMISSION_PERIOD_INDEX, CAN_DIRECT_INDEX, CAN_DIRECT_PERIOD_INDEX, OUTPUT_NAME_INDEX, OPTIONS_SEPARATOR, OPTIONS_INDEX, - OPTIONS_EQUAL, HIDDEN_OPTIONS_INDEX) + OPTIONS_EQUAL, HIDDEN_OPTIONS_INDEX, USER_FILE_INDEX) from sans.common.enums import (BatchReductionEntry, OutputMode, SANSInstrument, RangeStepType, SampleShape, FitType) from sans.common.file_information import (SANSFileInformationFactory) from sans.user_file.user_file_reader import UserFileReader @@ -159,7 +159,6 @@ class RunTabPresenter(object): if not user_file_path: return - # 2. Get the full file path user_file_path = FileFinder.getFullPath(user_file_path) if not os.path.exists(user_file_path): @@ -205,7 +204,6 @@ class RunTabPresenter(object): # 2. Read the batch file batch_file_parser = BatchCsvParser(batch_file_path) parsed_rows = batch_file_parser.parse_batch_file() - # 3. Clear the table self._view.clear_table() @@ -799,6 +797,7 @@ class RunTabPresenter(object): can_direct = self._view.get_cell(row=row, column=CAN_DIRECT_INDEX, convert_to=str) can_direct_period = self._view.get_cell(row=row, column=CAN_DIRECT_PERIOD_INDEX, convert_to=str) output_name = self._view.get_cell(row=row, column=OUTPUT_NAME_INDEX, convert_to=str) + user_file = self._view.get_cell(row=row, column=USER_FILE_INDEX, convert_to=str) # Get the options string # We don't have to add the hidden column here, since it only contains information for the SANS @@ -819,6 +818,7 @@ class RunTabPresenter(object): can_direct=can_direct, can_direct_period=can_direct_period, output_name=output_name, + user_file = user_file, options_column_string=options_string) table_model.add_table_entry(row, table_index_model) return table_model @@ -843,18 +843,33 @@ class RunTabPresenter(object): for row in rows: self.sans_logger.information("Generating state for row {}".format(row)) if not self.is_empty_row(row): - try: - state = gui_state_director.create_state(row) - states.update({row: state}) - except ValueError as e: - self.sans_logger.error("There was a bad entry for row {}. Ensure that the path to your files has " - "been added to the Mantid search directories! See here for more " - "details: {}".format(row, str(e))) - raise RuntimeError("There was a bad entry for row {}. Ensure that the path to your files has " - "been added to the Mantid search directories! See here for more " - "details: {}".format(row, str(e))) + row_user_file = table_model.get_row_user_file(row) + if row_user_file: + user_file_path = FileFinder.getFullPath(row_user_file) + if not os.path.exists(user_file_path): + raise RuntimeError("The user path {} does not exist. Make sure a valid user file path" + " has been specified.".format(user_file_path)) + + user_file_reader = UserFileReader(user_file_path) + user_file_items = user_file_reader.read_user_file() + + row_state_model = StateGuiModel(user_file_items) + row_gui_state_director = GuiStateDirector(table_model, row_state_model, self._facility) + self._create_row_state(row_gui_state_director, states, row) + else: + self._create_row_state(gui_state_director, states, row) return states + def _create_row_state(self, director, states, row): + try: + state = director.create_state(row) + states.update({row: state}) + except ValueError as e: + error_msg = "There was a bad entry for row {}. Ensure that the path to your files has been added to the " \ + "Mantid search directories! See here for more details: {}" + self.sans_logger.error(error_msg.format(row, str(e))) + raise RuntimeError(error_msg.format(row, str(e))) + def _populate_row_in_table(self, row): """ Adds a row to the table diff --git a/scripts/SANS/sans/gui_logic/sans_data_processor_gui_algorithm.py b/scripts/SANS/sans/gui_logic/sans_data_processor_gui_algorithm.py index 18df3a5a8eeb347816a1ac630825d6565466c0b2..41620dadfbc3af2da4778615b1986e17c58cc3c7 100644 --- a/scripts/SANS/sans/gui_logic/sans_data_processor_gui_algorithm.py +++ b/scripts/SANS/sans/gui_logic/sans_data_processor_gui_algorithm.py @@ -153,6 +153,14 @@ def create_properties(): default='', prefix='', property_type=str), + algorithm_list_entry(column_name="User File", + algorithm_property="UserFile", + description=('The user file to use, this will override GUI changes for this row.' + ' If left unspecified default will be used'), + show_value=False, + default="", + prefix='', + property_type=str), algorithm_list_entry(column_name="", algorithm_property="RowIndex", description='The row index (which is automatically populated by the GUI)', diff --git a/scripts/SANS/sans/test_helper/mock_objects.py b/scripts/SANS/sans/test_helper/mock_objects.py index 72a0ecd64e3710af8882d11d02705944a09f2c24..18baed89e6b98ff81c7d3155426b474c7451a447 100644 --- a/scripts/SANS/sans/test_helper/mock_objects.py +++ b/scripts/SANS/sans/test_helper/mock_objects.py @@ -6,6 +6,7 @@ from ui.sans_isis.masking_table import MaskingTable from sans.gui_logic.presenter.run_tab_presenter import RunTabPresenter from sans.common.enums import (RangeStepType, OutputMode) from sans.test_helper.test_director import TestDirector +from functools import (partial) import sys if sys.version_info.major == 3: @@ -26,7 +27,7 @@ def create_mock_masking_table(): return view -def get_cell_mock(row, column, convert_to=None): +def get_cell_mock(row, column, convert_to=None, user_file_path = ""): _ = convert_to # noqa if row == 0: # For the first row we return the @@ -39,6 +40,13 @@ def get_cell_mock(row, column, convert_to=None): return "SANS2D00022048" else: return "" + elif row == 1: + if column == 0: + return "SANS2D00022024" + if column == 13: + return user_file_path + else: + return "" else: # For the other rows, we only return sample scatter if column == 0: @@ -64,10 +72,12 @@ def add_listener_mock(listener): mock_listener_list.append(listener) -def create_mock_view(user_file_path, batch_file_path=None): +def create_mock_view(user_file_path, batch_file_path=None, row_user_file_path = ""): + get_cell_mock_with_path = partial(get_cell_mock, user_file_path = row_user_file_path) + view = mock.create_autospec(SANSDataProcessorGui, spec_set=False) view.get_user_file_path = mock.Mock(return_value=user_file_path) - view.get_cell = mock.MagicMock(side_effect=get_cell_mock) + view.get_cell = mock.MagicMock(side_effect=get_cell_mock_with_path) view.get_batch_file_path = mock.MagicMock(return_value=batch_file_path) view.get_number_of_rows = mock.MagicMock(return_value=2) diff --git a/scripts/SANS/sans/test_helper/user_file_test_helper.py b/scripts/SANS/sans/test_helper/user_file_test_helper.py index 6758b99a076209a00d2dd4c3cdeee559960a7b6a..cbc4433c43b5ba2386572c83e3e8a763d5b61552 100644 --- a/scripts/SANS/sans/test_helper/user_file_test_helper.py +++ b/scripts/SANS/sans/test_helper/user_file_test_helper.py @@ -2,80 +2,80 @@ from __future__ import (absolute_import, division, print_function) import tempfile -sample_user_file = ("PRINT for changer\n" - "MASK/CLEAR \n" - "MASK/CLEAR/TIME\n" - "L/WAV 1.5 12.5 0.125/LIN\n" - "L/Q .001,.001, .0126, -.08, .2\n" - "!L/Q .001 .8 .08/log\n" - "L/QXY 0 0.05 .001/lin\n" - "BACK/M1 35000 65000\n" - "BACK/M2 85000 98000\n" - "BACK/MON/TIMES 3500 4500\n" - "BACK/TRANS 123 466\n" - "DET/REAR\n" - "GRAVITY/ON\n" - "!FIT/TRANS/OFF\n" - "FIT/TRANS/LOG 1.5 12.5\n" - "FIT/MONITOR 1000 2000\n" - "mask/rear h0\n" - "mask/rear h190>h191\n" - "mask/rear h167>h172\n" - "mask/rear v0\n" - "mask/rear v191\n" - "mask/front h0\n" - "mask/front h190>h191\n" - "mask/front v0\n" - "mask/front v191\n" - "! dead wire near top\n" - "mask/front h156>h159\n" - "!masking off beamstop arm - 12mm wide @ 19degrees\n" - "!mask/rear/line 12 19\n" - "! spot on rhs beam stop at 11m\n" - "! mask h57>h66+v134>v141\n" - "!\n" - "! mask for Bragg at 12m, 26/03/11, 3 time channels\n" - "mask/time 17500 22000\n" - "!\n" - "L/R 12 15\n" - "L/Q/RCut 200\n" - "L/Q/WCut 8.0\n" - "!PRINT REMOVED RCut=200 WCut=8\n" - "!\n" - "MON/DIRECT=DIRECTM1_15785_12m_31Oct12_v12.dat\n" - "MON/TRANS/SPECTRUM=1/INTERPOLATE\n" - "MON/SPECTRUM=1/INTERPOLATE\n" - "!TRANS/TRANSPEC=3\n" - "TRANS/TRANSPEC=4/SHIFT=-70\n" - "TRANS/RADIUS=7.0\n" - "TRANS/ROI=test.xml, test2.xml\n" - "TRANS/MASK=test3.xml, test4.xml\n" - "!\n" - "set centre 155.45 -169.6\n" - "!\n" - "! 25/10/13 centre gc 22021, fit gdw20 22023\n" - "set scales 0.074 1.0 1.0 1.0 1.0\n" - "! correction to actual sample position, notionally 81mm before shutter\n" - "SAMPLE/OFFSET +53.0\n" - "! Correction to SANS2D encoders in mm\n" - "DET/CORR/REAR/X -16.0\n" - "DET/CORR/REAR/Z 47.0\n" - "DET/CORR/FRONT/X -44.0\n" - "DET/CORR/FRONT/Y -20.0\n" - "DET/CORR/FRONT/Z 47.0\n" - "DET/CORR/FRONT/ROT 0.0\n" - "!\n" - "!! 01/10/13 MASKSANS2d_133F M3 by M1 trans Hellsing, Rennie, Jackson, L1=L2=12m A1=20 and A2=8mm\n" - "L/EVENTSTIME 7000.0,500.0,60000.0\n" - "SAMPLE/PATH/ON\n" - "QRESOL/ON \n" - "QRESOL/DELTAR=11 \n" - "QRESOL/LCOLLIM=12 \n" - "QRESOL/MODERATOR=moderator_rkh_file.txt\n" - "QRESOL/A1=13\n" - "QRESOL/A2=14\n" - "TUBECALIBFILE=TUBE_SANS2D_BOTH_31681_25Sept15.nxs" - ) +base_user_file = ("PRINT for changer\n" + "MASK/CLEAR \n" + "MASK/CLEAR/TIME\n" + "L/WAV 1.5 12.5 0.125/LIN\n" + "L/Q .001,.001, .0126, -.08, .2\n" + "!L/Q .001 .8 .08/log\n" + "L/QXY 0 0.05 .001/lin\n" + "BACK/M1 35000 65000\n" + "BACK/M2 85000 98000\n" + "BACK/MON/TIMES 3500 4500\n" + "BACK/TRANS 123 466\n" + "DET/REAR\n" + "GRAVITY/{}\n" + "!FIT/TRANS/OFF\n" + "FIT/TRANS/LOG 1.5 12.5\n" + "FIT/MONITOR 1000 2000\n" + "mask/rear h0\n" + "mask/rear h190>h191\n" + "mask/rear h167>h172\n" + "mask/rear v0\n" + "mask/rear v191\n" + "mask/front h0\n" + "mask/front h190>h191\n" + "mask/front v0\n" + "mask/front v191\n" + "! dead wire near top\n" + "mask/front h156>h159\n" + "!masking off beamstop arm - 12mm wide @ 19degrees\n" + "!mask/rear/line 12 19\n" + "! spot on rhs beam stop at 11m\n" + "! mask h57>h66+v134>v141\n" + "!\n" + "! mask for Bragg at 12m, 26/03/11, 3 time channels\n" + "mask/time 17500 22000\n" + "!\n" + "L/R 12 15\n" + "L/Q/RCut 200\n" + "L/Q/WCut 8.0\n" + "!PRINT REMOVED RCut=200 WCut=8\n" + "!\n" + "MON/DIRECT=DIRECTM1_15785_12m_31Oct12_v12.dat\n" + "MON/TRANS/SPECTRUM=1/INTERPOLATE\n" + "MON/SPECTRUM=1/INTERPOLATE\n" + "!TRANS/TRANSPEC=3\n" + "TRANS/TRANSPEC=4/SHIFT=-70\n" + "TRANS/RADIUS=7.0\n" + "TRANS/ROI=test.xml, test2.xml\n" + "TRANS/MASK=test3.xml, test4.xml\n" + "!\n" + "set centre 155.45 -169.6\n" + "!\n" + "! 25/10/13 centre gc 22021, fit gdw20 22023\n" + "set scales 0.074 1.0 1.0 1.0 1.0\n" + "! correction to actual sample position, notionally 81mm before shutter\n" + "SAMPLE/OFFSET +53.0\n" + "! Correction to SANS2D encoders in mm\n" + "DET/CORR/REAR/X -16.0\n" + "DET/CORR/REAR/Z 47.0\n" + "DET/CORR/FRONT/X -44.0\n" + "DET/CORR/FRONT/Y -20.0\n" + "DET/CORR/FRONT/Z 47.0\n" + "DET/CORR/FRONT/ROT 0.0\n" + "!\n" + "!! 01/10/13 MASKSANS2d_133F M3 by M1 trans Hellsing, Rennie, Jackson, L1=L2=12m A1=20 and A2=8mm\n" + "L/EVENTSTIME 7000.0,500.0,60000.0\n" + "SAMPLE/PATH/ON\n" + "QRESOL/ON \n" + "QRESOL/DELTAR=11 \n" + "QRESOL/LCOLLIM=12 \n" + "QRESOL/MODERATOR=moderator_rkh_file.txt\n" + "QRESOL/A1=13\n" + "QRESOL/A2=14\n" + "TUBECALIBFILE=TUBE_SANS2D_BOTH_31681_25Sept15.nxs" + ) def create_user_file(user_file_content): @@ -84,3 +84,11 @@ def create_user_file(user_file_content): user_file_path = temp.name temp.close() return user_file_path + + +def make_sample_user_file(gravity ='ON'): + return base_user_file.format(gravity) + + +sample_user_file = make_sample_user_file(gravity ='ON') +sample_user_file_gravity_OFF = make_sample_user_file(gravity ='OFF') diff --git a/scripts/test/CrystalFieldMultiSiteTest.py b/scripts/test/CrystalFieldMultiSiteTest.py index 8a0e085df566692906083615fe4686ff86fa12cc..96b7af7b73e404343ff8a3d1e8337ca482e174c7 100644 --- a/scripts/test/CrystalFieldMultiSiteTest.py +++ b/scripts/test/CrystalFieldMultiSiteTest.py @@ -1,5 +1,6 @@ import numpy as np import unittest + from CrystalField.CrystalFieldMultiSite import CrystalFieldMultiSite c_mbsr = 79.5774715459 # Conversion from barn to mb/sr @@ -235,7 +236,7 @@ class CrystalFieldMultiSiteTests(unittest.TestCase): cf = CrystalFieldMultiSite(Ions=['Ce', 'Pr'], Symmetries=['C2v', 'C2v'], Temperatures=[44.0], FWHMs=[1.1], ToleranceIntensity=6.0, ToleranceEnergy=1.0, FixAllPeaks=True, parameters=params) - cf.fix('ion0.BmolX', 'ion0.BmolY', 'ion0.BmolZ', 'ion0.BextX', 'ion0.BextY', 'ion0.BextZ', 'ion0.B40', + cf.fix('ion0.BmolX', 'ion0.BmolY', 'ion0.BmolZ', 'ion0.BextX', 'ion0.BextY', 'ion0.BextZ', 'ion0.B40', 'ion0.B42', 'ion0.B44', 'ion0.B60', 'ion0.B62', 'ion0.B64', 'ion0.B66', 'ion0.IntensityScaling', 'ion1.BmolX', 'ion1.BmolY', 'ion1.BmolZ', 'ion1.BextX', 'ion1.BextY', 'ion1.BextZ', 'ion1.B40', 'ion1.B42', 'ion1.B44', 'ion1.B60', 'ion1.B62', 'ion1.B64', 'ion1.B66', 'ion1.IntensityScaling') @@ -591,4 +592,4 @@ class CrystalFieldMultiSiteTests(unittest.TestCase): s = str(cf.function) self.assertTrue('ion0.IntensityScaling=0.2*ion2.IntensityScaling' in s) self.assertTrue('ion1.IntensityScaling=0.8*ion2.IntensityScaling' in s) - self.assertTrue('ion3.IntensityScaling=0.1*ion2.IntensityScaling' in s) \ No newline at end of file + self.assertTrue('ion3.IntensityScaling=0.1*ion2.IntensityScaling' in s) diff --git a/scripts/test/Muon/transformPresenter_test.py b/scripts/test/Muon/transformPresenter_test.py index fa51ed4cee96c7637ca79a936e30d40cf9a498a6..b184745a590a14e7b0f443af3332b3e371b93207 100644 --- a/scripts/test/Muon/transformPresenter_test.py +++ b/scripts/test/Muon/transformPresenter_test.py @@ -1,5 +1,6 @@ import sys +import mantid #noqa from Muon import fft_presenter from Muon import load_utils from Muon import transform_presenter diff --git a/scripts/test/SANS/gui_logic/gui_state_director_test.py b/scripts/test/SANS/gui_logic/gui_state_director_test.py index 138f78caaca1a4af4d622240bf1021e9d381661e..73f5a62dc018f946bb1684d7088fcb3d975e46c7 100644 --- a/scripts/test/SANS/gui_logic/gui_state_director_test.py +++ b/scripts/test/SANS/gui_logic/gui_state_director_test.py @@ -3,8 +3,6 @@ from __future__ import (absolute_import, division, print_function) import unittest import os -import mantid - from sans.gui_logic.presenter.gui_state_director import GuiStateDirector from sans.gui_logic.models.table_model import (TableModel, TableIndexModel) from sans.gui_logic.models.state_gui_model import StateGuiModel @@ -18,7 +16,7 @@ class GuiStateDirectorTest(unittest.TestCase): @staticmethod def _get_table_model(option_string=""): table_index_model = TableIndexModel(0, "SANS2D00022024", "", "", "", "", "", "", "", "", - "", "", "", "", option_string) + "", "", "", "", "", option_string) table_model = TableModel() table_model.add_table_entry(0, table_index_model) return table_model @@ -69,5 +67,3 @@ class GuiStateDirectorTest(unittest.TestCase): if __name__ == '__main__': unittest.main() - - diff --git a/scripts/test/SANS/gui_logic/main_presenter_test.py b/scripts/test/SANS/gui_logic/main_presenter_test.py index f3e0e7aaf4e9a12b3a6e8183dc3301ff2b07788e..06f6fa8701a47d022495285ca92b99fd0b8c08f5 100644 --- a/scripts/test/SANS/gui_logic/main_presenter_test.py +++ b/scripts/test/SANS/gui_logic/main_presenter_test.py @@ -21,7 +21,7 @@ class MainPresenterTest(unittest.TestCase): presenter = MainPresenter(SANSFacility.ISIS) self.assertTrue(presenter.get_number_of_white_list_items() == 0) white_list = presenter.get_white_list() - self.assertTrue(presenter.get_number_of_white_list_items() == 16) + self.assertTrue(presenter.get_number_of_white_list_items() == 17) self.assertTrue(white_list[0].algorithm_property == "SampleScatter") self.assertTrue(white_list[1].algorithm_property == "SampleScatterPeriod") self.assertTrue(white_list[2].algorithm_property == "SampleTransmission") @@ -36,15 +36,16 @@ class MainPresenterTest(unittest.TestCase): self.assertTrue(white_list[11].algorithm_property == "CanDirectPeriod") self.assertTrue(white_list[12].algorithm_property == "UseOptimizations") self.assertTrue(white_list[13].algorithm_property == "OutputName") - self.assertTrue(white_list[14].algorithm_property == "RowIndex") - self.assertTrue(white_list[15].algorithm_property == "OutputMode") + self.assertTrue(white_list[14].algorithm_property == "UserFile") + self.assertTrue(white_list[15].algorithm_property == "RowIndex") + self.assertTrue(white_list[16].algorithm_property == "OutputMode") def test_that_black_list_is_correct(self): presenter = MainPresenter(SANSFacility.ISIS) expected = "InputWorkspace,OutputWorkspace,SampleScatter,SampleScatterPeriod,SampleTransmission," \ "SampleTransmissionPeriod,SampleDirect,SampleDirectPeriod,CanScatter,CanScatterPeriod," \ "CanTransmission,CanTransmissionPeriod,CanDirect,CanDirectPeriod," \ - "UseOptimizations,OutputName,RowIndex,OutputMode," + "UseOptimizations,OutputName,UserFile,RowIndex,OutputMode," self.assertTrue(expected == presenter.get_black_list()) def test_that_gets_pre_processing_options_are_valid_and_other_options_are_empty(self): diff --git a/scripts/test/SANS/gui_logic/run_tab_presenter_test.py b/scripts/test/SANS/gui_logic/run_tab_presenter_test.py index e3d7f27c2b53c2c5622480ef64d0126403bca0ab..cd3eda088e6f537f426f44dc2aa325b17e23711b 100644 --- a/scripts/test/SANS/gui_logic/run_tab_presenter_test.py +++ b/scripts/test/SANS/gui_logic/run_tab_presenter_test.py @@ -10,7 +10,7 @@ from mantid.kernel import PropertyManagerDataService from sans.gui_logic.presenter.run_tab_presenter import RunTabPresenter from sans.common.enums import (SANSFacility, ReductionDimensionality, SaveType, ISISReductionMode, RangeStepType, FitType) -from sans.test_helper.user_file_test_helper import (create_user_file, sample_user_file) +from sans.test_helper.user_file_test_helper import (create_user_file, sample_user_file, sample_user_file_gravity_OFF) from sans.test_helper.mock_objects import (create_mock_view) from sans.test_helper.common import (remove_file, save_to_csv) @@ -105,7 +105,7 @@ class RunTabPresenterTest(unittest.TestCase): # Assert certain function calls self.assertTrue(view.get_user_file_path.call_count == 3) self.assertTrue(view.get_batch_file_path.call_count == 2) # called twice for the sub presenter updates (masking table and settings diagnostic tab) # noqa - self.assertTrue(view.get_cell.call_count == 60) + self.assertTrue(view.get_cell.call_count == 64) self.assertTrue(view.get_number_of_rows.call_count == 6) @@ -202,6 +202,7 @@ class RunTabPresenterTest(unittest.TestCase): # Check some entries self.assertTrue(state0.slice.start_time is None) self.assertTrue(state0.slice.end_time is None) + self.assertTrue(state0.reduction.reduction_dimensionality is ReductionDimensionality.OneDim) # Clean up @@ -228,6 +229,22 @@ class RunTabPresenterTest(unittest.TestCase): # Clean up self._remove_files(user_file_path=user_file_path, batch_file_path=batch_file_path) + def test_that_can_get_states_from_row_user_file(self): + # Arrange + row_user_file_path = create_user_file(sample_user_file_gravity_OFF) + batch_file_path, user_file_path, presenter, _ = self._get_files_and_mock_presenter(BATCH_FILE_TEST_CONTENT_2, row_user_file_path) + + presenter.on_user_file_load() + presenter.on_batch_file_load() + + # Act + state = presenter.get_state_for_row(1) + state0 = presenter.get_state_for_row(0) + + # Assert + self.assertTrue(state.convert_to_q.use_gravity is False) + self.assertTrue(state0.convert_to_q.use_gravity is True) + def test_that_returns_none_when_index_does_not_exist(self): # Arrange batch_file_path = save_to_csv(BATCH_FILE_TEST_CONTENT_2) @@ -358,10 +375,10 @@ class RunTabPresenterTest(unittest.TestCase): PropertyManagerDataService.remove(element) @staticmethod - def _get_files_and_mock_presenter(content): + def _get_files_and_mock_presenter(content, row_user_file_path = ""): batch_file_path = save_to_csv(content) user_file_path = create_user_file(sample_user_file) - view, _, _ = create_mock_view(user_file_path, batch_file_path) + view, _, _ = create_mock_view(user_file_path, batch_file_path, row_user_file_path) # We just use the sample_user_file since it exists. view.get_mask_file = mock.MagicMock(return_value=user_file_path) presenter = RunTabPresenter(SANSFacility.ISIS) diff --git a/scripts/test/SANS/gui_logic/table_model_test.py b/scripts/test/SANS/gui_logic/table_model_test.py index 232b497917111305f0b2e93bb444f2ba9444a5d7..739b2aaaa733098e84f66bac0629c0f2b273aea0 100644 --- a/scripts/test/SANS/gui_logic/table_model_test.py +++ b/scripts/test/SANS/gui_logic/table_model_test.py @@ -2,8 +2,6 @@ from __future__ import (absolute_import, division, print_function) import unittest -import mantid - from sans.gui_logic.models.table_model import (TableModel, TableIndexModel) @@ -31,7 +29,7 @@ class TableModelTest(unittest.TestCase): def test_that_can_set_the_options_column_model(self): table_index_model = TableIndexModel(0, "", "", "", "", "", "", - "", "", "", "", "", "", "", + "", "", "", "", "", "", "", "", "WavelengthMin=1, WavelengthMax=3, NotRegister2=1") options_column_model = table_index_model.options_column_model options = options_column_model.get_options() @@ -40,10 +38,23 @@ class TableModelTest(unittest.TestCase): self.assertTrue(options["WavelengthMax"] == 3.) def test_that_raises_for_missing_equal(self): - args = [0, "", "", "", "", "", "", "", "", "", "", "", "", "", + args = [0, "", "", "", "", "", "", "", "", "", "", "", "", "", "", "WavelengthMin=1, WavelengthMax=3, NotRegister2"] self.assertRaises(ValueError, TableIndexModel, *args) + def test_that_querying_nonexistent_row_index_raises_IndexError_exception(self): + table_model = TableModel() + args = [0] + self.assertRaises(IndexError, table_model.get_row_user_file, *args) + + def test_that_can_retrieve_user_file_from_table_index_model(self): + table_model = TableModel() + table_index_model = TableIndexModel(2, "", "", "", "", "", "", + "", "", "", "", "", "", "", "User_file_name") + table_model.add_table_entry(2, table_index_model) + user_file = table_model.get_row_user_file(2) + self.assertEqual(user_file,"User_file_name") + def _do_test_file_setting(self, func, prop): # Test that can set to empty string table_model = TableModel() @@ -71,5 +82,6 @@ class TableModelTest(unittest.TestCase): table_model = TableModel() table_model.user_file = value + if __name__ == '__main__': unittest.main()