diff --git a/Framework/Algorithms/CMakeLists.txt b/Framework/Algorithms/CMakeLists.txt index 58a3a316cf1a47a670ab030d027eefee315fafe7..8cffdf1cb499559851ec004d9e8ba9cadf15248b 100644 --- a/Framework/Algorithms/CMakeLists.txt +++ b/Framework/Algorithms/CMakeLists.txt @@ -194,6 +194,7 @@ set ( SRC_FILES src/ModeratorTzeroLinear.cpp src/MonitorEfficiencyCorUser.cpp src/MonteCarloAbsorption.cpp + src/MostLikelyMean.cpp src/MultipleScatteringCylinderAbsorption.cpp src/Multiply.cpp src/MultiplyRange.cpp @@ -527,6 +528,7 @@ set ( INC_FILES inc/MantidAlgorithms/ModeratorTzeroLinear.h inc/MantidAlgorithms/MonitorEfficiencyCorUser.h inc/MantidAlgorithms/MonteCarloAbsorption.h + inc/MantidAlgorithms/MostLikelyMean.h inc/MantidAlgorithms/MultipleScatteringCylinderAbsorption.h inc/MantidAlgorithms/Multiply.h inc/MantidAlgorithms/MultiplyRange.h @@ -864,6 +866,7 @@ set ( TEST_FILES ModeratorTzeroTest.h MonitorEfficiencyCorUserTest.h MonteCarloAbsorptionTest.h + MostLikelyMeanTest.h MultipleScatteringCylinderAbsorptionTest.h MultiplyRangeTest.h MultiplyTest.h diff --git a/Framework/Algorithms/inc/MantidAlgorithms/MostLikelyMean.h b/Framework/Algorithms/inc/MantidAlgorithms/MostLikelyMean.h new file mode 100644 index 0000000000000000000000000000000000000000..f673f021c8179e5e42628164f1829598e3ebc9de --- /dev/null +++ b/Framework/Algorithms/inc/MantidAlgorithms/MostLikelyMean.h @@ -0,0 +1,49 @@ +#ifndef MANTID_ALGORITHMS_MOSTLIKELYMEAN_H_ +#define MANTID_ALGORITHMS_MOSTLIKELYMEAN_H_ + +#include "MantidAPI/Algorithm.h" +#include "MantidAlgorithms/DllConfig.h" + +namespace Mantid { +namespace Algorithms { + +/** MostLikelyMean : Computes the most likely mean of the array by + * minimizing the taxicab distance of the elements from the rest. + + Copyright © 2017 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge + National Laboratory & European Spallation Source + + This file is part of Mantid. + + Mantid is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + Mantid is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. + + File change history is stored at: <https://github.com/mantidproject/mantid> + Code Documentation is available at: <http://doxygen.mantidproject.org> +*/ +class MANTID_ALGORITHMS_DLL MostLikelyMean : public API::Algorithm { +public: + const std::string name() const override; + int version() const override; + const std::string category() const override; + const std::string summary() const override; + +private: + void init() override; + void exec() override; +}; + +} // namespace Algorithms +} // namespace Mantid + +#endif /* MANTID_ALGORITHMS_MOSTLIKELYMEAN_H_ */ diff --git a/Framework/Algorithms/src/MostLikelyMean.cpp b/Framework/Algorithms/src/MostLikelyMean.cpp new file mode 100644 index 0000000000000000000000000000000000000000..31228b7d2072fdbececc54e7f57532bdd00c4e26 --- /dev/null +++ b/Framework/Algorithms/src/MostLikelyMean.cpp @@ -0,0 +1,73 @@ +#include "MantidAlgorithms/MostLikelyMean.h" +#include "MantidKernel/ArrayProperty.h" +#include "MantidKernel/NullValidator.h" +#include "MantidKernel/PropertyWithValue.h" + +#include "boost/multi_array.hpp" + +namespace Mantid { +namespace Algorithms { + +using Mantid::Kernel::Direction; +using Mantid::Kernel::ArrayProperty; +using Mantid::Kernel::NullValidator; +using Mantid::Kernel::PropertyWithValue; + +// Register the algorithm into the AlgorithmFactory +DECLARE_ALGORITHM(MostLikelyMean) + +//---------------------------------------------------------------------------------------------- + +/// Algorithms name for identification. @see Algorithm::name +const std::string MostLikelyMean::name() const { return "MostLikelyMean"; } + +/// Algorithm's version for identification. @see Algorithm::version +int MostLikelyMean::version() const { return 1; } + +/// Algorithm's category for identification. @see Algorithm::category +const std::string MostLikelyMean::category() const { return "Arithmetic"; } + +/// Algorithm's summary for use in the GUI and help. @see Algorithm::summary +const std::string MostLikelyMean::summary() const { + return "Computes the most likely mean of the array by minimizing the taxicab " + "distance of the elements from the rest."; +} + +//---------------------------------------------------------------------------------------------- +/** Initialize the algorithm's properties. + */ +void MostLikelyMean::init() { + auto nullValidator = boost::make_shared<NullValidator>(); + declareProperty(Kernel::make_unique<ArrayProperty<double>>( + "InputArray", nullValidator, Direction::Input), + "An input array."); + declareProperty(Kernel::make_unique<PropertyWithValue<double>>( + "Output", 0., Direction::Output), + "The output (mean)."); +} + +//---------------------------------------------------------------------------------------------- +/** Execute the algorithm. + */ +void MostLikelyMean::exec() { + const std::vector<double> input = getProperty("InputArray"); + const int size = static_cast<int>(input.size()); + boost::multi_array<double, 2> cov(boost::extents[size][size]); + PARALLEL_FOR_IF(true) + for (int i = 0; i < size; ++i) { + for (int j = 0; j <= i; ++j) { + double diff = sqrt(fabs(input[i] - input[j])); + cov[i][j] = diff; + cov[j][i] = diff; + } + } + std::vector<double> sums(size); + for (int i = 0; i < size; ++i) { + sums[i] = std::accumulate(cov[i].begin(), cov[i].end(), 0.); + } + const auto minIndex = std::min_element(sums.cbegin(), sums.cend()); + setProperty("Output", input[std::distance(sums.cbegin(), minIndex)]); +} + +} // namespace Algorithms +} // namespace Mantid diff --git a/Framework/Algorithms/test/MostLikelyMeanTest.h b/Framework/Algorithms/test/MostLikelyMeanTest.h new file mode 100644 index 0000000000000000000000000000000000000000..32d08d5c3a4da0e211b9846e0b337d58abe49f5d --- /dev/null +++ b/Framework/Algorithms/test/MostLikelyMeanTest.h @@ -0,0 +1,58 @@ +#ifndef MANTID_ALGORITHMS_MOSTLIKELYMEANTEST_H_ +#define MANTID_ALGORITHMS_MOSTLIKELYMEANTEST_H_ + +#include <cxxtest/TestSuite.h> + +#include "MantidAlgorithms/MostLikelyMean.h" + +using Mantid::Algorithms::MostLikelyMean; + +class MostLikelyMeanTest : public CxxTest::TestSuite { +public: + // This pair of boilerplate methods prevent the suite being created statically + // This means the constructor isn't called when running other tests + static MostLikelyMeanTest *createSuite() { return new MostLikelyMeanTest(); } + static void destroySuite(MostLikelyMeanTest *suite) { delete suite; } + + void test_Init() { + MostLikelyMean alg; + TS_ASSERT_THROWS_NOTHING(alg.initialize()) + TS_ASSERT(alg.isInitialized()) + } + + void test_exec() { + MostLikelyMean alg; + std::vector<double> input = {1, 2, 3, 4, 5}; + alg.initialize(); + alg.setProperty("InputArray", input); + alg.execute(); + const double mean = alg.getProperty("Output"); + TS_ASSERT_EQUALS(mean, 3); + } +}; + +class MostLikelyMeanTestPerformance : public CxxTest::TestSuite { +public: + static MostLikelyMeanTestPerformance *createSuite() { + return new MostLikelyMeanTestPerformance(); + } + static void destroySuite(MostLikelyMeanTestPerformance *suite) { + delete suite; + } + + void setUp() override { + std::vector<double> input(10000); + for (size_t i = 0; i < input.size(); ++i) { + input[i] = double(sqrt(static_cast<double>(i))); + } + m_alg.initialize(); + m_alg.setProperty("InputArray", input); + } + + void test_performance() { m_alg.execute(); } + +private: + MostLikelyMean m_alg; +}; + +#endif /* MANTID_ALGORITHMS_MOSTLIKELYMEANTEST_H_ */ diff --git a/Framework/CurveFitting/src/Algorithms/SplineInterpolation.cpp b/Framework/CurveFitting/src/Algorithms/SplineInterpolation.cpp index 6a3029a7a449402b7b1f148a4a9485d0a13e9ce9..032f4748d1a0764e4f556317605b300211e91748 100644 --- a/Framework/CurveFitting/src/Algorithms/SplineInterpolation.cpp +++ b/Framework/CurveFitting/src/Algorithms/SplineInterpolation.cpp @@ -222,8 +222,7 @@ void SplineInterpolation::exec() { } } // Store the output workspaces - std::string derivWsName = getPropertyValue("OutputWorkspaceDeriv"); - if (order > 0 && !derivWsName.empty()) { + if (order > 0 && !isDefault("OutputWorkspaceDeriv")) { // Store derivatives in a grouped workspace WorkspaceGroup_sptr wsg = WorkspaceGroup_sptr(new WorkspaceGroup); for (size_t i = 0; i < histNo; ++i) { diff --git a/Framework/DataHandling/inc/MantidDataHandling/LoadILLDiffraction.h b/Framework/DataHandling/inc/MantidDataHandling/LoadILLDiffraction.h index 2fc9f669e7fd7a2177989997d8085813427bef42..0d7413970515722ca6b1feafabea924355b8a2e0 100644 --- a/Framework/DataHandling/inc/MantidDataHandling/LoadILLDiffraction.h +++ b/Framework/DataHandling/inc/MantidDataHandling/LoadILLDiffraction.h @@ -67,37 +67,44 @@ private: std::map<std::string, std::string> validateInputs() override; void exec() override; + void calculateRelativeRotations(std::vector<double> &instrumentAngles, + const Kernel::V3D &tube1Position); + + void fillDataScanMetaData(const NeXus::NXDouble &); + void fillMovingInstrumentScan(const NeXus::NXUInt &, const NeXus::NXDouble &); + void fillStaticInstrumentScan(const NeXus::NXUInt &, const NeXus::NXDouble &, + const NeXus::NXFloat &); + std::vector<Types::Core::DateAndTime> getAbsoluteTimes(const NeXus::NXDouble &) const; std::vector<double> getAxis(const NeXus::NXDouble &) const; std::vector<double> getDurations(const NeXus::NXDouble &) const; std::vector<double> getMonitor(const NeXus::NXDouble &) const; std::string getInstrumentFilePath(const std::string &) const; + Kernel::V3D getReferenceComponentPosition( + const API::MatrixWorkspace_sptr &instrumentWorkspace); bool containsCalibratedData(const std::string &filename) const; - void fillDataScanMetaData(const NeXus::NXDouble &); std::vector<double> getScannedVaribleByPropertyName(const NeXus::NXDouble &scan, const std::string &propertyName) const; - void fillMovingInstrumentScan(const NeXus::NXUInt &, const NeXus::NXDouble &); - void fillStaticInstrumentScan(const NeXus::NXUInt &, const NeXus::NXDouble &, - const NeXus::NXFloat &); void initStaticWorkspace(); void initMovingWorkspace(const NeXus::NXDouble &scan); - Kernel::V3D getReferenceComponentPosition( - const API::MatrixWorkspace_sptr &instrumentWorkspace); - void calculateRelativeRotations(std::vector<double> &instrumentAngles, - const Kernel::V3D &tube1Position); + void loadDataScan(); + API::MatrixWorkspace_sptr loadEmptyInstrument(); void loadMetaData(); void loadScanVars(); void loadStaticInstrument(); - API::MatrixWorkspace_sptr loadEmptyInstrument(); void moveTwoThetaZero(double); void resolveInstrument(); void resolveScanType(); + void setSampleLogs(); + void computeThetaOffset(); + double + m_offsetTheta; ///< the 2theta offset for D20 to account for dead pixels size_t m_sizeDim1; ///< size of dim1, number of tubes (D2B) or the whole /// detector (D20) size_t m_sizeDim2; ///< size of dim2, number of pixels (1 for D20!) diff --git a/Framework/DataHandling/src/LoadILLDiffraction.cpp b/Framework/DataHandling/src/LoadILLDiffraction.cpp index f175dd10ddc4f99874491c4dc6fdc9f28d7d04e4..0921cf4c7f71484e52fc822a4bdba99eee163e34 100644 --- a/Framework/DataHandling/src/LoadILLDiffraction.cpp +++ b/Framework/DataHandling/src/LoadILLDiffraction.cpp @@ -1,6 +1,4 @@ #include "MantidDataHandling/LoadILLDiffraction.h" -#include "MantidGeometry/Instrument/ComponentInfo.h" -#include "MantidGeometry/Instrument/DetectorInfo.h" #include "MantidAPI/FileProperty.h" #include "MantidAPI/MatrixWorkspace.h" #include "MantidAPI/RegisterFileLoader.h" @@ -8,19 +6,21 @@ #include "MantidDataHandling/H5Util.h" #include "MantidDataObjects/ScanningWorkspaceBuilder.h" #include "MantidGeometry/Instrument/ComponentHelper.h" +#include "MantidGeometry/Instrument/ComponentInfo.h" +#include "MantidGeometry/Instrument/DetectorInfo.h" #include "MantidKernel/ConfigService.h" #include "MantidKernel/DateAndTime.h" #include "MantidKernel/ListValidator.h" #include "MantidKernel/OptionalBool.h" +#include "MantidKernel/PropertyWithValue.h" #include "MantidKernel/TimeSeriesProperty.h" #include "MantidKernel/make_unique.h" -#include <boost/algorithm/string/predicate.hpp> -#include <numeric> - #include <H5Cpp.h> -#include <nexus/napi.h> #include <Poco/Path.h> +#include <boost/algorithm/string.hpp> +#include <nexus/napi.h> +#include <numeric> namespace Mantid { namespace DataHandling { @@ -43,7 +43,10 @@ constexpr size_t D20_NUMBER_DEAD_PIXELS = 32; constexpr size_t NUMBER_MONITORS = 1; // This is the angular size of a pixel in degrees (in low resolution mode) constexpr double D20_PIXEL_SIZE = 0.1; -constexpr double rad2deg = 180. / M_PI; +// The conversion factor from radian to degree +constexpr double RAD_TO_DEG = 180. / M_PI; +// A factor to compute E from lambda: E (mev) = waveToE/lambda(A) +constexpr double WAVE_TO_E = 81.8; } // Register the algorithm into the AlgorithmFactory @@ -117,7 +120,7 @@ std::map<std::string, std::string> LoadILLDiffraction::validateInputs() { */ void LoadILLDiffraction::exec() { - Progress progress(this, 0, 1, 3); + Progress progress(this, 0, 1, 4); m_filename = getPropertyValue("Filename"); @@ -131,6 +134,9 @@ void LoadILLDiffraction::exec() { progress.report("Loading the metadata"); loadMetaData(); + progress.report("Setting additional sample logs"); + setSampleLogs(); + setProperty("OutputWorkspace", m_outWorkspace); } @@ -194,8 +200,8 @@ void LoadILLDiffraction::loadDataScan() { } resolveScanType(); - resolveInstrument(); + computeThetaOffset(); if (m_scanType == DetectorScan) { initMovingWorkspace(scan); @@ -338,10 +344,10 @@ void LoadILLDiffraction::calculateRelativeRotations( // tube. Here we get the angle of that tube as defined in the IDF. double firstTubeRotationAngle = - firstTubePosition.angle(V3D(0, 0, 1)) * rad2deg; + firstTubePosition.angle(V3D(0, 0, 1)) * RAD_TO_DEG; if (m_instName == "D20") { - firstTubeRotationAngle += D20_NUMBER_DEAD_PIXELS * D20_PIXEL_SIZE; + firstTubeRotationAngle += m_offsetTheta; } else if (m_instName == "D2B") { firstTubeRotationAngle = -firstTubeRotationAngle; std::transform(tubeRotations.begin(), tubeRotations.end(), @@ -383,14 +389,11 @@ void LoadILLDiffraction::fillMovingInstrumentScan(const NXUInt &data, } } - // Dead pixel offset, should be zero except for D20 - size_t deadOffset = (m_numberDetectorsRead - m_numberDetectorsActual) / 2; - // Then load the detector spectra for (size_t i = NUMBER_MONITORS; i < m_numberDetectorsActual + NUMBER_MONITORS; ++i) { for (size_t j = 0; j < m_numberScanPoints; ++j) { - const auto tubeNumber = (i - NUMBER_MONITORS + deadOffset) / m_sizeDim2; + const auto tubeNumber = (i - NUMBER_MONITORS) / m_sizeDim2; const auto pixelInTubeNumber = (i - NUMBER_MONITORS) % m_sizeDim2; unsigned int y = data(static_cast<int>(j), static_cast<int>(tubeNumber), static_cast<int>(pixelInTubeNumber)); @@ -425,12 +428,11 @@ void LoadILLDiffraction::fillStaticInstrumentScan(const NXUInt &data, [](double e) { return sqrt(e); }); // Assign detector counts - size_t deadOffset = (m_numberDetectorsRead - m_numberDetectorsActual) / 2; for (size_t i = NUMBER_MONITORS; i < m_numberDetectorsActual + NUMBER_MONITORS; ++i) { auto &spectrum = m_outWorkspace->mutableY(i); auto &errors = m_outWorkspace->mutableE(i); - const auto tubeNumber = (i - NUMBER_MONITORS + deadOffset) / m_sizeDim2; + const auto tubeNumber = (i - NUMBER_MONITORS) / m_sizeDim2; const auto pixelInTubeNumber = (i - NUMBER_MONITORS) % m_sizeDim2; for (size_t j = 0; j < m_numberScanPoints; ++j) { unsigned int y = data(static_cast<int>(j), static_cast<int>(tubeNumber), @@ -484,8 +486,12 @@ void LoadILLDiffraction::fillDataScanMetaData(const NXDouble &scan) { auto &mutableRun = m_outWorkspace->mutableRun(); for (size_t i = 0; i < m_scanVar.size(); ++i) { if (!boost::starts_with(m_scanVar[i].property, "Monitor")) { - auto property = Kernel::make_unique<TimeSeriesProperty<double>>( - m_scanVar[i].name + "." + m_scanVar[i].property); + const std::string scanVarName = + boost::algorithm::to_lower_copy(m_scanVar[i].name); + const std::string scanVarProp = + boost::algorithm::to_lower_copy(m_scanVar[i].property); + const std::string propName = scanVarName + "." + scanVarProp; + auto property = Kernel::make_unique<TimeSeriesProperty<double>>(propName); for (size_t j = 0; j < m_numberScanPoints; ++j) { property->addValue(absoluteTimes[j], scan(static_cast<int>(i), static_cast<int>(j))); @@ -636,38 +642,22 @@ void LoadILLDiffraction::resolveInstrument() { // Here we have to hardcode the numbers of pixels. // The only way is to read the size of the detectors read from the files // and based on it decide which of the 3 alternative IDFs to load. - // Some amount of pixels are dead on each end, these have to be + // Some amount of pixels are dead on at right end, these have to be // subtracted // correspondingly dependent on the resolution mode m_resolutionMode = m_numberDetectorsRead / D20_NUMBER_PIXELS; size_t activePixels = D20_NUMBER_PIXELS - 2 * D20_NUMBER_DEAD_PIXELS; m_numberDetectorsActual = m_resolutionMode * activePixels; - // 1: low resolution, 2: nominal, 3: high resolution - switch (m_resolutionMode) { - case 1: { - // low resolution mode - m_instName += "_lr"; - m_numberDetectorsActual = - D20_NUMBER_PIXELS - 2 * D20_NUMBER_DEAD_PIXELS; - break; - } - case 2: { - // nominal resolution - m_numberDetectorsActual = - 2 * (D20_NUMBER_PIXELS - 2 * D20_NUMBER_DEAD_PIXELS); - break; - } - case 3: { - // high resolution mode - m_instName += "_hr"; - m_numberDetectorsActual = - 3 * (D20_NUMBER_PIXELS - 2 * D20_NUMBER_DEAD_PIXELS); - break; - } - default: + + if (m_resolutionMode > 3 || m_resolutionMode < 1) { throw std::runtime_error("Unknown resolution mode for instrument " + m_instName); } + if (m_resolutionMode == 1) { + m_instName += "_lr"; + } else if (m_resolutionMode == 3) { + m_instName += "_hr"; + } } g_log.debug() << "Instrument name is " << m_instName << " and has " << m_numberDetectorsActual << " actual detectors.\n"; @@ -708,7 +698,7 @@ void LoadILLDiffraction::moveTwoThetaZero(double twoTheta0Read) { IComponent_const_sptr component = instrument->getComponentByName("detector"); double twoTheta0Actual = twoTheta0Read; if (m_instName == "D20") { - twoTheta0Actual += D20_NUMBER_DEAD_PIXELS * D20_PIXEL_SIZE; + twoTheta0Actual += m_offsetTheta; } Quat rotation(twoTheta0Actual, V3D(0, 1, 0)); g_log.debug() << "Setting 2theta0 to " << twoTheta0Actual; @@ -733,6 +723,37 @@ LoadILLDiffraction::getInstrumentFilePath(const std::string &instName) const { return fullPath.toString(); } +/** Adds some sample logs needed later by reduction +*/ +void LoadILLDiffraction::setSampleLogs() { + Run &run = m_outWorkspace->mutableRun(); + std::string scanTypeStr = "NoScan"; + if (m_scanType == DetectorScan) { + scanTypeStr = "DetectorScan"; + } else if (m_scanType == OtherScan) { + scanTypeStr = "OtherScan"; + } + run.addLogData( + new PropertyWithValue<std::string>("ScanType", std::move(scanTypeStr))); + run.addLogData(new PropertyWithValue<double>( + "PixelSize", D20_PIXEL_SIZE / static_cast<double>(m_resolutionMode))); + std::string resModeStr = "Nominal"; + if (m_resolutionMode == 1) { + resModeStr = "Low"; + } else if (m_resolutionMode == 3) { + resModeStr = "High"; + } + run.addLogData(new PropertyWithValue<std::string>("ResolutionMode", + std::move(resModeStr))); + if (m_scanType != NoScan) { + run.addLogData(new PropertyWithValue<int>( + "ScanSteps", static_cast<int>(m_numberScanPoints))); + } + double lambda = run.getLogAsSingleValue("wavelength"); + double eFixed = WAVE_TO_E / (lambda * lambda); + run.addLogData(new PropertyWithValue<double>("Ei", eFixed)); +} + /** * Returns true if the file contains calibrated data * @@ -748,5 +769,13 @@ bool LoadILLDiffraction::containsCalibratedData( return descriptor.pathExists("/entry0/data_scan/detector_data/raw_data"); } +/** + * Computes the 2theta offset of the decoder for D20 + */ +void LoadILLDiffraction::computeThetaOffset() { + m_offsetTheta = static_cast<double>(D20_NUMBER_DEAD_PIXELS) * D20_PIXEL_SIZE - + D20_PIXEL_SIZE / (static_cast<double>(m_resolutionMode) * 2); +} + } // namespace DataHandling } // namespace Mantid diff --git a/Framework/DataHandling/test/LoadILLDiffractionTest.h b/Framework/DataHandling/test/LoadILLDiffractionTest.h index af4cf2e35b41b5217cfe11a7450edf9082d6cacb..dd0abadee187bf17ce0827589978b57166406520 100644 --- a/Framework/DataHandling/test/LoadILLDiffractionTest.h +++ b/Framework/DataHandling/test/LoadILLDiffractionTest.h @@ -3,11 +3,11 @@ #include <cxxtest/TestSuite.h> +#include "MantidDataHandling/LoadILLDiffraction.h" #include "MantidAPI/AnalysisDataService.h" -#include "MantidGeometry/Instrument/DetectorInfo.h" #include "MantidAPI/MatrixWorkspace.h" #include "MantidDataHandling/Load.h" -#include "MantidDataHandling/LoadILLDiffraction.h" +#include "MantidGeometry/Instrument/DetectorInfo.h" #include "MantidKernel/ConfigService.h" using namespace Mantid::API; @@ -59,33 +59,54 @@ public: TS_ASSERT(!outputWS->isHistogramData()) TS_ASSERT(!outputWS->isDistribution()) + // two theta of the first pixel + TS_ASSERT_DELTA(outputWS->detectorInfo().signedTwoTheta(1) * RAD_2_DEG, + -2.79662, 1E-5) + TS_ASSERT_EQUALS(outputWS->x(0)[0], 0.) - TS_ASSERT_EQUALS(outputWS->y(0)[0], 2685529) - TS_ASSERT_DELTA(outputWS->e(0)[0], 1638.76, 0.01) + TS_ASSERT_EQUALS(outputWS->y(0)[0], 2685529.) + TS_ASSERT_DELTA(outputWS->e(0)[0], 1638.75, 0.01) TS_ASSERT_EQUALS(outputWS->x(1)[0], 0.) - TS_ASSERT_EQUALS(outputWS->y(1)[0], 548) - TS_ASSERT_DELTA(outputWS->e(1)[0], 23.40, 0.01) + TS_ASSERT_EQUALS(outputWS->y(1)[0], 0.) + TS_ASSERT_EQUALS(outputWS->e(1)[0], 0.) - TS_ASSERT_EQUALS(outputWS->x(2)[0], 0.) - TS_ASSERT_EQUALS(outputWS->y(2)[0], 991) - TS_ASSERT_DELTA(outputWS->e(2)[0], 31.48, 0.01) + TS_ASSERT_EQUALS(outputWS->x(64)[0], 0.) + TS_ASSERT_EQUALS(outputWS->y(64)[0], 0.) + TS_ASSERT_EQUALS(outputWS->e(64)[0], 0.) + + TS_ASSERT_EQUALS(outputWS->x(65)[0], 0.) + TS_ASSERT_EQUALS(outputWS->y(65)[0], 548.) + TS_ASSERT_DELTA(outputWS->e(65)[0], 23.4, 0.01) TS_ASSERT_EQUALS(outputWS->x(1111)[0], 0.) - TS_ASSERT_EQUALS(outputWS->y(1111)[0], 7080) - TS_ASSERT_DELTA(outputWS->e(1111)[0], 84.14, 0.01) + TS_ASSERT_EQUALS(outputWS->y(1111)[0], 6285) + TS_ASSERT_DELTA(outputWS->e(1111)[0], 79.27, 0.01) TS_ASSERT_EQUALS(outputWS->x(3072)[0], 0.) - TS_ASSERT_EQUALS(outputWS->y(3072)[0], 0.) - TS_ASSERT_EQUALS(outputWS->e(3072)[0], 0.) - - TS_ASSERT(outputWS->run().hasProperty("simulated_d20.TotalCount")) - TS_ASSERT(outputWS->run().hasProperty("AcquisitionSpy.Time")) - TS_ASSERT(outputWS->run().hasProperty("SampleSettings.SampleTemp")) - - const auto sim = outputWS->run().getLogData("simulated_d20.TotalCount"); - const auto spy = outputWS->run().getLogData("AcquisitionSpy.Time"); - const auto sample = outputWS->run().getLogData("SampleSettings.SampleTemp"); + TS_ASSERT_EQUALS(outputWS->y(3072)[0], 7848.) + TS_ASSERT_DELTA(outputWS->e(3072)[0], 88.58, 0.01) + + const auto &run = outputWS->run(); + TS_ASSERT(run.hasProperty("simulated_d20.TotalCount")) + TS_ASSERT(run.hasProperty("AcquisitionSpy.Time")) + TS_ASSERT(run.hasProperty("SampleSettings.SampleTemp")) + TS_ASSERT(run.hasProperty("ScanType")) + TS_ASSERT(run.hasProperty("PixelSize")) + TS_ASSERT(run.hasProperty("ResolutionMode")) + TS_ASSERT(run.hasProperty("Ei")) + + const auto sim = run.getLogData("simulated_d20.TotalCount"); + const auto spy = run.getLogData("AcquisitionSpy.Time"); + const auto sample = run.getLogData("SampleSettings.SampleTemp"); + const auto scanType = run.getLogData("ScanType"); + const double pixelSize = run.getLogAsSingleValue("PixelSize"); + const auto resMode = run.getLogData("ResolutionMode"); + const auto ei = run.getLogAsSingleValue("Ei"); + + TS_ASSERT_EQUALS(scanType->value(), "NoScan") + TS_ASSERT_EQUALS(resMode->value(), "Nominal") + TS_ASSERT_DELTA(pixelSize, 0.05, 1E-10) TS_ASSERT_EQUALS(sim->size(), 1) TS_ASSERT_EQUALS(spy->size(), 1) @@ -95,6 +116,7 @@ public: TS_ASSERT_EQUALS(spy->value(), "2017-May-15 14:36:18 240\n") TS_ASSERT_EQUALS(sample->value(), "2017-May-15 14:36:18 4.9681\n") + TS_ASSERT_DELTA(ei, 14.09, 0.01) TS_ASSERT_EQUALS( outputWS->run().getProperty("Detector.calibration_file")->value(), "none") @@ -154,15 +176,18 @@ public: } } - TS_ASSERT(outputWS->run().hasProperty("Omega.Position")) - TS_ASSERT(outputWS->run().hasProperty("Detector.TotalCount")) - TS_ASSERT(outputWS->run().hasProperty("AcquisitionSpy.Time")) - TS_ASSERT(outputWS->run().hasProperty("SampleSettings.SampleTemp")) - TS_ASSERT(outputWS->run().hasProperty("MagneticField.field")) - - const auto omega = outputWS->run().getLogData("Omega.Position"); - + const auto &run = outputWS->run(); + TS_ASSERT(outputWS->run().hasProperty("omega.position")) + TS_ASSERT(outputWS->run().hasProperty("detector.totalcount")) + TS_ASSERT(outputWS->run().hasProperty("acquisitionspy.time")) + TS_ASSERT(outputWS->run().hasProperty("samplesettings.sampletemp")) + TS_ASSERT(outputWS->run().hasProperty("magneticfield.field")) + const auto omega = run.getLogData("omega.position"); TS_ASSERT_EQUALS(omega->size(), 21) + const double steps = run.getLogAsSingleValue("ScanSteps"); + const auto scanType = run.getLogData("ScanType"); + TS_ASSERT_EQUALS(scanType->value(), "OtherScan") + TS_ASSERT_DELTA(steps, 21., 1E-10) const std::string omegaTimeSeriesValue = "2017-Feb-15 08:58:52 1\n2017-Feb-15 08:58:52.521547000 " diff --git a/Framework/Kernel/src/ArrayOrderedPairsValidator.cpp b/Framework/Kernel/src/ArrayOrderedPairsValidator.cpp index a970917f819d560c3d176f39a2c4cea54bce2977..457389a5e0d75f0f156ff4a037f074ac9dd99c12 100644 --- a/Framework/Kernel/src/ArrayOrderedPairsValidator.cpp +++ b/Framework/Kernel/src/ArrayOrderedPairsValidator.cpp @@ -8,7 +8,7 @@ namespace Mantid { namespace Kernel { /** - * Create a clone of the current ArrayBoundedValidator. + * Create a clone of the current ArrayOrderedPairsValidator. * @return The cloned object. */ template <typename TYPE> diff --git a/Framework/PythonInterface/mantid/kernel/CMakeLists.txt b/Framework/PythonInterface/mantid/kernel/CMakeLists.txt index 9d7ca816920248970d2f1400c9d24cea44a366ed..1bd8da284bb24ff3abe99049c3d4f5070f992547 100644 --- a/Framework/PythonInterface/mantid/kernel/CMakeLists.txt +++ b/Framework/PythonInterface/mantid/kernel/CMakeLists.txt @@ -36,6 +36,7 @@ set ( EXPORT_FILES src/Exports/ListValidator.cpp src/Exports/ArrayLengthValidator.cpp src/Exports/ArrayBoundedValidator.cpp + src/Exports/ArrayOrderedPairsValidator.cpp src/Exports/MandatoryValidator.cpp src/Exports/CompositeValidator.cpp src/Exports/LogFilter.cpp diff --git a/Framework/PythonInterface/mantid/kernel/src/Exports/ArrayOrderedPairsValidator.cpp b/Framework/PythonInterface/mantid/kernel/src/Exports/ArrayOrderedPairsValidator.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1986a524d33029fa24152e4b22ba64abbd32d047 --- /dev/null +++ b/Framework/PythonInterface/mantid/kernel/src/Exports/ArrayOrderedPairsValidator.cpp @@ -0,0 +1,26 @@ +#include "MantidKernel/ArrayOrderedPairsValidator.h" +#include <boost/python/class.hpp> +#include <boost/python/make_constructor.hpp> +#include <boost/python/return_value_policy.hpp> + +using Mantid::Kernel::ArrayOrderedPairsValidator; +using Mantid::Kernel::IValidator; +using namespace boost::python; + +namespace { +template <typename TYPE> +ArrayOrderedPairsValidator<TYPE> *createArrayOrderedPairsValidator() { + return new ArrayOrderedPairsValidator<TYPE>(); +} + +#define EXPORT_PAIRSVALIDATOR(type, prefix) \ + class_<ArrayOrderedPairsValidator<type>, bases<IValidator>, \ + boost::noncopyable>(#prefix "ArrayOrderedPairsValidator") \ + .def("__init__", \ + make_constructor(&createArrayOrderedPairsValidator<type>, \ + default_call_policies())); +} +void export_ArrayOrderedPairsValidator() { + EXPORT_PAIRSVALIDATOR(double, Float); + EXPORT_PAIRSVALIDATOR(long, Int); +} diff --git a/Framework/PythonInterface/plugins/algorithms/LoadAndMerge.py b/Framework/PythonInterface/plugins/algorithms/LoadAndMerge.py index 0b92d71dd2997ffaa7e1262cb591608789d316aa..8e954c53f6964d3ba5314a08747be04859bdd391 100644 --- a/Framework/PythonInterface/plugins/algorithms/LoadAndMerge.py +++ b/Framework/PythonInterface/plugins/algorithms/LoadAndMerge.py @@ -3,7 +3,7 @@ from __future__ import (absolute_import, division, print_function) import os.path from mantid.kernel import Direction, StringContainsValidator, PropertyManagerProperty from mantid.api import AlgorithmFactory, AlgorithmManager, MultipleFileProperty, \ - WorkspaceProperty, PythonAlgorithm, FileLoaderRegistry + WorkspaceProperty, PythonAlgorithm, FileLoaderRegistry, Progress from mantid.simpleapi import MergeRuns, RenameWorkspace, DeleteWorkspace, GroupWorkspaces, mtd @@ -13,6 +13,7 @@ class LoadAndMerge(PythonAlgorithm): _version = None _loader_options = None _prefix = '' + _progress = None def name(self): return "LoadMergeRuns" @@ -55,6 +56,7 @@ class LoadAndMerge(PythonAlgorithm): @param run : the full file path @param runnumber : the run number """ + self._progress.report('Loading '+runnumber) alg = self._create_fresh_loader() alg.setPropertyValue('Filename', run) alg.setPropertyValue('OutputWorkspace', runnumber) @@ -70,11 +72,8 @@ class LoadAndMerge(PythonAlgorithm): # So running on the same instance can potentially cause problems. # Also the output will always be on ADS, since this algorithm relies on # MergeRuns, which does not work outside ADS (because of WorkspaceGroup input) - # Moreover, this should NOT be run as child, since in that case if we run a loader - # having an additional optional output workspace, but without requesting the optional output, - # it will still be produced with some hidden temporary name (__TMPx...). - # This is related to replaying the history, or might as well be a bug in Algorithm base class. - alg = AlgorithmManager.create(self._loader, self._version) + alg = self.createChildAlgorithm(self._loader, self._version) + alg.setAlwaysStoreInADS(True) alg.initialize() for key in self._loader_options.keys(): alg.setPropertyValue(key, self._loader_options.getPropertyValue(key)) @@ -82,6 +81,9 @@ class LoadAndMerge(PythonAlgorithm): def PyExec(self): runs = self.getProperty('Filename').value + runs_as_str = self.getPropertyValue('Filename') + number_runs = runs_as_str.count(',') + runs_as_str.count('+') + 1 + self._progress = Progress(self, start=0.0, end=1.0, nreports=number_runs) self._loader = self.getPropertyValue('LoaderName') self._version = self.getProperty('LoaderVersion').value self._loader_options = self.getProperty('LoaderOptions').value diff --git a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/PowderDiffILLDetEffCorr.py b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/PowderDiffILLDetEffCorr.py new file mode 100644 index 0000000000000000000000000000000000000000..7d0d1395704d0dee1709aaa67606c2cda48c7bfc --- /dev/null +++ b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/PowderDiffILLDetEffCorr.py @@ -0,0 +1,461 @@ +from __future__ import (absolute_import, division, print_function) + +import math +import numpy as np +from mantid.kernel import StringListValidator, Direction, IntArrayBoundedValidator, IntArrayProperty, \ + CompositeValidator, IntArrayLengthValidator, IntArrayOrderedPairsValidator, FloatArrayOrderedPairsValidator, \ + FloatArrayProperty, VisibleWhenProperty, PropertyCriterion +from mantid.api import PythonAlgorithm, FileProperty, FileAction, Progress, MatrixWorkspaceProperty, PropertyMode +from mantid.simpleapi import * + + +class PowderDiffILLDetEffCorr(PythonAlgorithm): + + _out_name = None # the name of the output workspace + _input_file = None # input file (numor), must be a detector scan + _calib_file = None # file containing previously derived calibration constants + _progress = None # progress tracking + _method = None # calibration method + _scan_points = None # number of scan points (time indices) + _out_response = None # the name of the second output workspace with merged response + _bin_offset = None # this holds int(scan step / pixel size) + _n_det = None # number of detector pixels (typically 3072) + _normalise_to = None # normalisation option + _pixel_range = None # range of the pixels to derive calibration for, e.g. 65,3072 + _regions_of_interest = None # ROI to normalise to, e.g. 10,50,70,100, typically just one range + _interpolate = None # whether to interpolate 2thetas before taking relative ratios + _excluded_ranges = None # 2theta ranges to exclude when deriving the relative calibration factor + # e.g. -20,0,40,50 + _live_pixels = None # holds the list of cells that are not zero counting + + def _hide(self, name): + return '__' + self._out_name + '_' + name + + def category(self): + return "ILL\\Diffraction;Diffraction\\Reduction;Diffraction\\Calibration" + + def summary(self): + return "Performs detector efficiency correction calculation for powder diffraction instrument D20 at ILL." + + def name(self): + return "PowderDiffILLDetEffCorr" + + def PyInit(self): + self.declareProperty(FileProperty('CalibrationRun', '', action=FileAction.Load, extensions=['nxs']), + doc='File path of calibration run. Must be a detector scan.') + + self.declareProperty(FileProperty('CalibrationFile', '', action=FileAction.OptionalLoad, extensions=['nxs']), + doc='Optional file containing previous calibration constants.') + + self.declareProperty(name='CalibrationMethod', + defaultValue='Median', + validator=StringListValidator(['Median', 'Mean', 'MostLikelyMean']), + doc='The method of how the calibration constant of a ' + 'pixel relative to the neighbouring one is derived.') + + self.declareProperty(name='InterpolateOverlappingAngles', defaultValue=False, + doc='Wheter to interpolate 2theta values for overlapping regions between neighbouring cells.') + + self.declareProperty(name='NormaliseTo', + defaultValue='None', + validator=StringListValidator(['None', 'Monitor', 'ROI']), + doc='Normalise to time, monitor or ROI counts before deriving the calibration.') + + thetaRangeValidator = FloatArrayOrderedPairsValidator() + + self.declareProperty(FloatArrayProperty(name='ROI', values=[0,100.], validator=thetaRangeValidator), + doc='Regions of interest for normalisation [in scattering angle in degrees].') + + normaliseToROI = VisibleWhenProperty('NormaliseTo', PropertyCriterion.IsEqualTo, 'ROI') + self.setPropertySettings('ROI', normaliseToROI) + + self.declareProperty(FloatArrayProperty(name='ExcludedRange', values=[], validator=thetaRangeValidator), + doc='2theta regions to exclude from the computation of relative calibration constants ' + '[in scattering angle in degrees]. ') + + pixelRangeValidator = CompositeValidator() + greaterThanOne = IntArrayBoundedValidator() + greaterThanOne.setLower(1) + lengthTwo = IntArrayLengthValidator() + lengthTwo.setLength(2) + orderedPairsValidator = IntArrayOrderedPairsValidator() + pixelRangeValidator.add(greaterThanOne) + pixelRangeValidator.add(lengthTwo) + pixelRangeValidator.add(orderedPairsValidator) + + self.declareProperty(IntArrayProperty(name='PixelRange', values=[1,3072], validator=pixelRangeValidator), + doc='Range of the pixel numbers to compute the calibration factors for. ' + 'For the other pixels outside the range, the factor will be set to 1.') + + self.declareProperty(MatrixWorkspaceProperty('OutputResponseWorkspace', '', + optional=PropertyMode.Optional, direction=Direction.Output), + doc='Output workspace containing the summed diffraction patterns of all the pixels.') + + self.declareProperty(MatrixWorkspaceProperty('OutputWorkspace', '', + direction=Direction.Output), + doc='Output workspace containing the detector efficiencies for each pixel.') + + def validateInputs(self): + issues = dict() + return issues + + def _update_reference(self, ws, cropped_ws, ref_ws, factor): + """ + Merges the response of the current pixel with the current combined reference in the + overlapping region, taking into account the relative scale factor. + Updates the reference workspace to contain the weighted sum of the two, or the clone + of one or the other if the scale factor is pathological. + @param ws: input workspace containing data from the current pixel + @param cropped_ws: same as ws, but last bins cropped to match the size of the reference + @param ref_ws: current reference workspace + @param factor: relative efficiency factor for the current pixel + """ + if factor == 0.: + CloneWorkspace(InputWorkspace=cropped_ws, OutputWorkspace=ref_ws) + elif str(factor) != 'inf' and str(factor) != 'nan': + Scale(InputWorkspace=cropped_ws, OutputWorkspace=cropped_ws, Factor=factor) + WeightedMean(InputWorkspace1=ref_ws, InputWorkspace2=cropped_ws, OutputWorkspace=ref_ws) + + x = mtd[ws].readX(0)[-self._bin_offset] + last_bins = ws + '_last_bins' + CropWorkspace(InputWorkspace=ws, XMin=x, OutputWorkspace=last_bins) + ConjoinXRuns(InputWorkspaces=[ref_ws,last_bins], OutputWorkspace=ref_ws) + x = mtd[ref_ws].readX(0)[self._bin_offset] + CropWorkspace(InputWorkspace=ref_ws, XMin=x, OutputWorkspace=ref_ws) + DeleteWorkspace(last_bins) + + def _exclude_ranges(self, ratio_ws): + """ + Excludes 2theta ranges from the ratio workspace + @param ratio_ws : the name of the ratio workspace + """ + ConvertToHistogram(InputWorkspace=ratio_ws, OutputWorkspace=ratio_ws) + x = mtd[ratio_ws].readX(0) + xmin = x[0] + xmax = x[-1] + for excluded_range in self._excluded_ranges: + if excluded_range[1] > xmin and excluded_range[0] < xmax: + if excluded_range[0] > xmin: + xmin = excluded_range[0] + if excluded_range[1] < xmax: + xmax = excluded_range[1] + MaskBins(InputWorkspace=ratio_ws, OutputWorkspace=ratio_ws, XMin=xmin, XMax=xmax) + ConvertToPointData(InputWorkspace=ratio_ws, OutputWorkspace=ratio_ws) + + def _compute_relative_factor(self, ratio_ws): + """ + Calculates the relative detector efficiency from the workspace containing response ratios. + Implements mean, median and most likely mean methods. + @param ratio_ws: input workspace containing response ratios + @returns: relative calibration factor + """ + ratios = mtd[ratio_ws].extractY() + ratios = ratios[np.nonzero(ratios)] + factor = 1. + if ratios.any(): + if self._method == 'Median': + factor = np.median(ratios) + elif self._method == 'Mean': + factor = np.mean(ratios) + elif self._method == 'MostLikelyMean': + factor = MostLikelyMean(ratios) + return factor + + def _validate_scan(self, scan_ws): + """ + Ensures that the input workspace corresponds to a detector scan + @param scan_ws: input detector scan workspace + @throws: RuntimeError if the workspace is not a detector scan + """ + is_scanned = False + try: + mtd[scan_ws].detectorInfo().isMasked(0) + except RuntimeError: + is_scanned = True + if not is_scanned: + raise RuntimeError('The input run does not correspond to a detector scan.') + + def _reshape(self, raw_ws, ws_2d): + """ + Reshapes the single column detector scan workspace to a 2D workspace + with n_det+1 rows (including the monitor) and n_scan_points columns + Sets the signed 2theta as x-values. The output is a ragged workspace. + Sample logs are copied over, but the instrument is lost on purpose. + @param raw_ws : raw detector scan workspace + @param ws_2d : the name of the returned workspace + """ + y = mtd[raw_ws].extractY() + e = mtd[raw_ws].extractE() + x = mtd[raw_ws].getAxis(1).extractValues() + shape = [self._n_det+1, self._scan_points] + y_2d = np.reshape(y, shape) + e_2d = np.reshape(e, shape) + x_2d = np.reshape(x, shape) + CreateWorkspace(DataX=x_2d, DataY=y_2d, DataE=e_2d, NSpec=self._n_det+1, OutputWorkspace=ws_2d) + CopyLogs(InputWorkspace=raw_ws, OutputWorkspace=ws_2d) + + def _set_input_properties(self): + """ + Sets up the input properties of the algorithm + """ + self._input_file = self.getPropertyValue('CalibrationRun') + self._calib_file = self.getPropertyValue('CalibrationFile') + self._method = self.getPropertyValue('CalibrationMethod') + self._normalise_to = self.getPropertyValue('NormaliseTo') + self._regions_of_interest = self.getProperty('ROI').value + self._excluded_ranges = self.getProperty('ExcludedRange').value + self._interpolate = self.getProperty('InterpolateOverlappingAngles').value + self._pixel_range = self.getProperty('PixelRange').value + self._out_response = self.getPropertyValue('OutputResponseWorkspace') + self._out_name = self.getPropertyValue('OutputWorkspace') + + def _configure(self, raw_ws): + """ + Configures the fundamental parameters for the algorithm. + @param : the name of the raw detector scan workspace + """ + self._scan_points = mtd[raw_ws].getRun().getLogData('ScanSteps').value + self.log().information('Number of scan steps is: ' + str(self._scan_points)) + self._n_det = mtd[raw_ws].detectorInfo().size() - 1 + self.log().information('Number of detector pixels is: ' + str(self._n_det)) + pixel_size = mtd[raw_ws].getRun().getLogData('PixelSize').value + theta_zeros = mtd[raw_ws].getRun().getLogData('2theta.Position') + scan_step_in_pixel_numbers = (theta_zeros.nthValue(1) - theta_zeros.nthValue(0)) / pixel_size + self._bin_offset = int(math.ceil(scan_step_in_pixel_numbers)) + self.log().information('Bin offset is: ' + str(self._bin_offset)) + if (abs(self._bin_offset - scan_step_in_pixel_numbers) > 0.1 and not self._interpolate): + self.log().warning('Scan step is not an integer multiple of the pixel size. ' + 'Consider checking the option InterpolateOverlappingAngles.') + if self._pixel_range[1] > self._n_det: + self.log().warning('Last pixel number provided is larger than total number of pixels. ' + 'Taking the last existing pixel.') + self._pixel_range[1] = self._n_det + if self._excluded_ranges.any(): + n_excluded_ranges = int(len(self._excluded_ranges) / 2) + self._excluded_ranges = np.split(self._excluded_ranges, n_excluded_ranges) + + def _validate_roi(self, ws_2d): + """ + ROI has to be fully within the aperture of the detector at any time index. + Example: + time index : detector span (degrees) + first : -30 -> 120 + last : 10 -> 160 + ROI can not be wider than [10,120] + @param ws_2d : 2D input workspace + @throws : ValueError if ROI is not fully within the detector span at any time index + """ + roi_min = np.min(self._regions_of_interest) + roi_max = np.max(self._regions_of_interest) + first_cell_last_time_theta = mtd[ws_2d].readX(1)[-1] + last_cell_first_time_theta = mtd[ws_2d].readX(self._n_det)[0] + if roi_min < first_cell_last_time_theta or roi_max > last_cell_first_time_theta: + raise ValueError('Invalid ROI. The region must be fully contained within the detector at any time index. ' + 'For the given scan configuration, ROI can be within {0} and {1} degrees.' + .format(first_cell_last_time_theta,last_cell_first_time_theta)) + + def _normalise_roi(self, ws_2d): + """ + Normalises to the regions of interests (ROI) + @param ws_2d : 2D input workspace + """ + y = mtd[ws_2d].extractY() + x = mtd[ws_2d].extractX() + roi_counts_arr = np.ones(self._scan_points) + # typically should be number_rois = 1 + number_rois = int(len(self._regions_of_interest)/2) + starts = self._regions_of_interest[0::2] + ends = self._regions_of_interest[1::2] + first_cells = [] + last_cells = [] + for roi in range(number_rois): + first_cell = np.argmax(x[...,0]>starts[roi]) + first_cells.append(first_cell) + last_cell = np.argmin(x[...,0]<ends[roi]) + last_cells.append(last_cell) + for time_index in range(self._scan_points): + roi_counts = 0 + counts = y[...,time_index] + for roi in range(number_rois): + first_cell = first_cells[roi] - self._bin_offset * time_index + last_cell = last_cells[roi] - self._bin_offset * time_index + roi_counts += np.sum(counts[first_cell:last_cell]) + roi_counts_arr[time_index] = roi_counts + roi_ws = self._hide('roi') + ExtractSingleSpectrum(InputWorkspace=ws_2d, WorkspaceIndex=0, OutputWorkspace=roi_ws) + mtd[roi_ws].setY(0, roi_counts_arr) + mtd[roi_ws].setE(0, np.sqrt(roi_counts_arr)) + Divide(LHSWorkspace=ws_2d, RHSWorkspace=roi_ws, OutputWorkspace=ws_2d) + DeleteWorkspace(roi_ws) + + def _prepare_response_workspace(self, ws_2d, response_ws): + """ + Prepares the response workspace with x-axis set as 2theta values + @param ws_2d : 2D input workspace + @param response_ws : the name of the output response workspace + """ + size = (self._n_det - 1) * self._bin_offset + self._scan_points + y = np.zeros(size) + e = np.zeros(size) + x = np.zeros(size) + x_2d = mtd[ws_2d].extractX() + x[0:self._scan_points] = x_2d[0,...] + index = self._scan_points + for pixel in range(0,self._n_det): + x[index:(index+self._bin_offset)] = x_2d[pixel,-self._bin_offset:] + index += self._bin_offset + CreateWorkspace(DataX=x, DataY=y, DataE=e, NSpec=1, UnitX='Degrees', OutputWorkspace=response_ws) + + def _perform_absolute_normalisation(self, constants_ws): + """ + Performs the absolute normalisation + Find the median of already derived calibration constants, excluding the dead pixels + Divides all the constants by the median, for zero pixels sets the constants to 1. + @param constants_ws : the name of the constants workspace + """ + constants = mtd[constants_ws].extractY() + absolute_norm = np.median(constants[self._live_pixels]) + self.log().information('Absolute normalisation constant is: ' + str(absolute_norm)) + Scale(InputWorkspace=constants_ws, Factor=1./absolute_norm, OutputWorkspace=constants_ws) + for pixel in range(mtd[constants_ws].getNumberHistograms()): + if not self._live_pixels[pixel]: + mtd[constants_ws].dataY(pixel)[0] = 1. + mtd[constants_ws].dataE(pixel)[0] = 0. + + def _derive_calibration(self, ws_2d, constants_ws, response_ws): + """ + Computes the relative calibration factors sequentailly for all the pixels. + @param : 2D input workspace + @param : the output workspace name containing the calibration constants + @param : the output workspace name containing the combined response + """ + ref_ws = self._hide('ref') + zeros = np.zeros(self._n_det) + constants = np.ones(self._n_det) + self._live_pixels = np.zeros(self._n_det, dtype=bool) + CreateWorkspace(DataX=zeros, DataY=constants, DataE=zeros, NSpec=self._n_det, OutputWorkspace=constants_ws) + + # loop over all the requested pixels to derive the calibration sequentially + # this is a serial loop of about 3K iterations, so performance is critical + nreports = int(self._pixel_range[1] - self._pixel_range[0] + 1) + self._progress = Progress(self, start=0.0, end=1.0, nreports=nreports) + for det in range(self._pixel_range[0] - 1, self._pixel_range[1]): + self._progress.report('Computing the relative calibration factor for pixel #' + str(det)) + ws = '__det_' + str(det) + ExtractSingleSpectrum(InputWorkspace=ws_2d, WorkspaceIndex=det, OutputWorkspace=ws) + y = mtd[ws].readY(0) + x = mtd[ws].readX(0) + # keep track of dead pixels + if np.count_nonzero(y) > self._scan_points/5: + self._live_pixels[det] = True + + if det == self._pixel_range[0] - 1: + CropWorkspace(InputWorkspace=ws, OutputWorkspace=ref_ws, XMin=x[self._bin_offset]) + else: + ratio_ws = ws + '_ratio' + cropped_ws = ws + '_cropped' + CropWorkspace(InputWorkspace=ws, OutputWorkspace=cropped_ws, XMax=x[-(self._bin_offset+1)]) + + if self._interpolate and self._live_pixels[det]: + # SplineInterpolation invalidates the errors, so we need to copy them over + interp_ws = ws + '_interp' + SplineInterpolation(WorkspaceToInterpolate=cropped_ws, WorkspaceToMatch=ref_ws, + OutputWorkspace=interp_ws, OutputWorkspaceDeriv="", EnableLogging=False) + mtd[interp_ws].setE(0, mtd[cropped_ws].readE(0)) + RenameWorkspace(InputWorkspace=interp_ws, OutputWorkspace=cropped_ws) + else: + # here we need to effectively clone the x-axis + cloned_ref_ws = ws + '_cloned' + CloneWorkspace(InputWorkspace=ref_ws, OutputWorkspace=cloned_ref_ws) + mtd[cloned_ref_ws].setY(0, mtd[cropped_ws].readY(0)) + mtd[cloned_ref_ws].setE(0, mtd[cropped_ws].readE(0)) + RenameWorkspace(InputWorkspace=cloned_ref_ws, OutputWorkspace=cropped_ws) + + Divide(LHSWorkspace=ref_ws, RHSWorkspace=cropped_ws, OutputWorkspace=ratio_ws, EnableLogging=False) + if len(self._excluded_ranges) != 0: + self._exclude_ranges(ratio_ws) + factor = self._compute_relative_factor(ratio_ws) + DeleteWorkspace(ratio_ws) + + if str(factor) == 'nan' or str(factor) == 'inf' or factor == 0.: + self.log().warning('Factor is ' + str(factor) + ' for pixel #' + str(det)) + else: + self.log().debug('Factor derived for detector pixel #' + str(det) + ' is ' + str(factor)) + mtd[constants_ws].dataY(det)[0] = factor + + self._update_reference(ws, cropped_ws, ref_ws, factor) + DeleteWorkspace(cropped_ws) + + if self._out_response: + # take care of combined response + end = self._bin_offset + if det == self._pixel_range[1] - 1: + end = self._scan_points - self._bin_offset + for scan_point in range(0, self._bin_offset): + index = mtd[response_ws].blocksize() - self._bin_offset + scan_point + mtd[response_ws].dataY(0)[index] = mtd[ws].readY(0)[end + scan_point] + mtd[response_ws].dataE(0)[index] = mtd[ws].readE(0)[end + scan_point] + + for scan_point in range(0, end): + index = det * self._bin_offset + scan_point + mtd[response_ws].dataY(0)[index] = mtd[ref_ws].readY(0)[scan_point] + mtd[response_ws].dataE(0)[index] = mtd[ref_ws].readE(0)[scan_point] + + DeleteWorkspace(ws) + # end of loop over pixels + DeleteWorkspace(ref_ws) + + def PyExec(self): + self._set_input_properties() + + raw_ws = self._hide('raw') + mon_ws = self._hide('mon') + ws_2d = self._hide('2d') + response_ws = self._hide('resp') + constants_ws = self._hide('constants') + + LoadILLDiffraction(Filename=self._input_file, OutputWorkspace=raw_ws) + self._validate_scan(raw_ws) + self._configure(raw_ws) + + ConvertSpectrumAxis(InputWorkspace=raw_ws, OutputWorkspace=raw_ws, Target='SignedTheta', OrderAxis=False) + self._reshape(raw_ws, ws_2d) + DeleteWorkspace(raw_ws) + # extract the monitor spectrum + ExtractSingleSpectrum(InputWorkspace=ws_2d, WorkspaceIndex=0, OutputWorkspace=mon_ws) + + if self._normalise_to == 'Monitor': + Divide(LHSWorkspace=ws_2d, RHSWorkspace=mon_ws, OutputWorkspace=ws_2d) + elif self._normalise_to == 'ROI': + self._validate_roi(ws_2d) + self._normalise_roi(ws_2d) + + DeleteWorkspace(mon_ws) + # only now crop out the monitor spectrum + CropWorkspace(InputWorkspace=ws_2d, StartWorkspaceIndex=1, OutputWorkspace=ws_2d) + ReplaceSpecialValues(InputWorkspace=ws_2d, OutputWorkspace=ws_2d, + NaNValue=0, NaNError=0, InfinityValue=0, InfinityError=0) + + if self._calib_file: + calib_ws = self._hide('calib') + LoadNexusProcessed(Filename=self._calib_file, OutputWorkspace=calib_ws) + Multiply(LHSWorkspace=ws_2d, RHSWorkspace=calib_ws, OutputWorkspace=ws_2d) + DeleteWorkspace(calib_ws) + + if self._out_response: + self._prepare_response_workspace(ws_2d, response_ws) + + # this is the main calculation + self._derive_calibration(ws_2d, constants_ws, response_ws) + DeleteWorkspace(ws_2d) + self._perform_absolute_normalisation(constants_ws) + + # set output workspace[s] + RenameWorkspace(InputWorkspace=constants_ws, OutputWorkspace=self._out_name) + self.setProperty('OutputWorkspace', self._out_name) + if self._out_response: + RenameWorkspace(InputWorkspace=response_ws, OutputWorkspace=self._out_response) + self.setProperty('OutputResponseWorkspace', self._out_response) + +#Register the algorithm with Mantid +AlgorithmFactory.subscribe(PowderDiffILLDetEffCorr) diff --git a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/PowderDiffILLReduction.py b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/PowderDiffILLReduction.py new file mode 100644 index 0000000000000000000000000000000000000000..699ec5e52f0c6cd2c164cb352d00083c32a5cd1b --- /dev/null +++ b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/PowderDiffILLReduction.py @@ -0,0 +1,336 @@ +from __future__ import (absolute_import, division, print_function) + +import numpy as np +from mantid.kernel import StringListValidator, Direction, FloatArrayProperty, \ + FloatArrayOrderedPairsValidator, VisibleWhenProperty, PropertyCriterion, FloatBoundedValidator +from mantid.api import PythonAlgorithm, MultipleFileProperty, FileProperty, \ + FileAction, Progress, MatrixWorkspaceProperty, NumericAxis +from mantid.simpleapi import * + + +class PowderDiffILLReduction(PythonAlgorithm): + + _calibration_file = None + _roc_file = None + _normalise_option = None + _observable = None + _sort_x_axis = None + _unit = None + _out_name = None + _progress = None + _crop_negative = None + _zero_counting_option = None + _rebin_width = None + _region_of_interest = [] + _zero_cells = [] + + def _hide(self, name): + return '__' + self._out_name + '_' + name + + def _hide_run(selfs, runnumber): + return '__' + runnumber + + def category(self): + return "ILL\\Diffraction;Diffraction\\Reduction" + + def summary(self): + return 'Performs powder diffraction data reduction for ILL instrument D20.' + + def name(self): + return "PowderDiffILLReduction" + + def validateInputs(self): + issues = dict() + rebin = self.getProperty('ScanAxisBinWidth').value + sort = self.getProperty('SortObservableAxis').value + if rebin != 0 and not sort: + issues['SortObservableAxis'] = 'Axis must be sorted if rebin is requested.' + return issues + + def PyInit(self): + + self.declareProperty(MultipleFileProperty('Run', extensions=['nxs']), + doc='File path of run(s).') + + self.declareProperty(FileProperty('CalibrationFile', '', + action=FileAction.OptionalLoad, extensions=['nxs']), + doc='File containing the detector efficiencies.') + + self.declareProperty(FileProperty('ROCCorrectionFile', '', + action=FileAction.OptionalLoad, extensions=['nxs']), + doc='File containing the radial oscillating collimator (ROC) corrections.') + + self.declareProperty(name='NormaliseTo', + defaultValue='None', + validator=StringListValidator(['None', 'Time', 'Monitor', 'ROI']), + doc='Normalise to time, monitor or ROI counts.') + + thetaRangeValidator = FloatArrayOrderedPairsValidator() + + self.declareProperty(FloatArrayProperty(name='ROI', values=[0, 153.6], validator=thetaRangeValidator), + doc='Regions of interest for normalisation [in scattering angle in degrees].') + + normaliseToROI = VisibleWhenProperty('NormaliseTo', PropertyCriterion.IsEqualTo, 'ROI') + self.setPropertySettings('ROI', normaliseToROI) + + self.declareProperty(name='Observable', + defaultValue='sample.temperature', + doc='Scanning observable, a Sample Log entry.') + + self.declareProperty(name='SortObservableAxis', + defaultValue=False, + doc='Whether or not to sort the scanning observable axis.') + + self.declareProperty(name='ScanAxisBinWidth', defaultValue=0., validator=FloatBoundedValidator(lower=0.), + doc='Rebin the observable axis to this width. Default is to not rebin.') + + self.declareProperty(name='CropNegative2Theta', defaultValue=True, + doc='Whether or not to crop out the bins corresponding to negative scattering angle.') + + self.declareProperty(name='ZeroCountingCells', defaultValue='Interpolate', + validator=StringListValidator(['Crop','Interpolate','Leave']), + doc='Crop out the zero counting cells or interpolate the counts from the neighbours.') + + self.declareProperty(name='Unit', + defaultValue='ScatteringAngle', + validator=StringListValidator(['ScatteringAngle', 'MomentumTransfer', 'dSpacing']), + doc='The unit of the reduced diffractogram.') + + self.declareProperty(MatrixWorkspaceProperty('OutputWorkspace', '', + direction=Direction.Output), + doc='Output workspace containing the reduced data.') + + def PyExec(self): + + self._progress = Progress(self, start=0.0, end=1.0, nreports=4) + + self._configure() + temp_ws = self._hide('temp') + joined_ws = self._hide('joined') + mon_ws = self._hide('mon') + + self._progress.report('Loading the data') + LoadAndMerge(Filename=self.getPropertyValue('Run'), + LoaderName='LoadILLDiffraction', + OutputWorkspace=temp_ws) + + self._progress.report('Normalising and merging') + if self._normalise_option == 'Time': + for ws in mtd[temp_ws]: + # normalise to time here, before joining, since the duration is in sample logs + duration = ws.getRun().getLogData('duration').value + Scale(InputWorkspace=ws,OutputWorkspace=ws,Factor=1./duration) + + try: + ConjoinXRuns(InputWorkspaces=temp_ws, SampleLogAsXAxis=self._observable, OutputWorkspace=joined_ws) + except RuntimeError: + raise ValueError('Invalid scanning observable') + + DeleteWorkspace(temp_ws) + + ExtractMonitors(InputWorkspace=joined_ws, DetectorWorkspace=joined_ws, MonitorWorkspace=mon_ws) + + if self._normalise_option == 'Monitor': + Divide(LHSWorkspace=joined_ws, RHSWorkspace=mon_ws, OutputWorkspace=joined_ws) + elif self._normalise_option == 'ROI': + self._normalise_to_roi(joined_ws) + + DeleteWorkspace(mon_ws) + + self._progress.report('Applying calibration or ROC if needed') + if self._calibration_file: + calib_ws = self._hide('calib') + LoadNexusProcessed(Filename=self._calibration_file, OutputWorkspace=calib_ws) + Multiply(LHSWorkspace=joined_ws, RHSWorkspace=calib_ws, OutputWorkspace=joined_ws) + DeleteWorkspace(calib_ws) + + if self._roc_file: + roc_ws = self._hide('roc') + LoadNexusProcessed(Filename=self._roc_file, OutputWorkspace=roc_ws) + Multiply(LHSWorkspace=joined_ws, RHSWorkspace=roc_ws, OutputWorkspace=joined_ws) + DeleteWorkspace(roc_ws) + + if self._sort_x_axis: + SortXAxis(InputWorkspace=joined_ws, OutputWorkspace=joined_ws) + + theta_ws = self._hide('theta') + ConvertSpectrumAxis(InputWorkspace=joined_ws, OutputWorkspace=theta_ws, Target='SignedTheta', OrderAxis=False) + theta_axis = mtd[theta_ws].getAxis(1).extractValues() + DeleteWorkspace(theta_ws) + first_positive_theta = int(np.where(theta_axis > 0)[0][0]) + + if self._crop_negative: + self.log().information('First positive 2theta at workspace index: ' + str(first_positive_theta)) + CropWorkspace(InputWorkspace=joined_ws, OutputWorkspace=joined_ws, StartWorkspaceIndex=first_positive_theta) + + self._progress.report('Treating the zero counting cells') + self._find_zero_cells(joined_ws) + + if self._zero_counting_option == 'Crop': + self._crop_zero_cells(joined_ws, self._zero_cells) + elif self._zero_counting_option == 'Interpolate': + self._interpolate_zero_cells(joined_ws, theta_axis) + + target = 'SignedTheta' + if self._unit == 'MomentumTransfer': + target = 'ElasticQ' + elif self._unit == 'dSpacing': + target = 'ElasticDSpacing' + + ConvertSpectrumAxis(InputWorkspace=joined_ws, OutputWorkspace=joined_ws, Target=target) + Transpose(InputWorkspace=joined_ws, OutputWorkspace=joined_ws) + + if self._rebin_width > 0: + self._group_spectra(joined_ws) + + RenameWorkspace(InputWorkspace=joined_ws, OutputWorkspace=self._out_name) + self.setProperty('OutputWorkspace', self._out_name) + + def _configure(self): + """ + Configures the input properties + """ + self._out_name = self.getPropertyValue('OutputWorkspace') + self._observable = self.getPropertyValue('Observable') + self._sort_x_axis = self.getProperty('SortObservableAxis').value + self._normalise_option = self.getPropertyValue('NormaliseTo') + self._calibration_file = self.getPropertyValue('CalibrationFile') + self._roc_file = self.getPropertyValue('ROCCorrectionFile') + self._unit = self.getPropertyValue('Unit') + self._crop_negative = self.getProperty('CropNegative2Theta').value + self._zero_counting_option = self.getPropertyValue('ZeroCountingCells') + self._rebin_width = self.getProperty('ScanAxisBinWidth').value + if self._normalise_option == 'ROI': + self._region_of_interest = self.getProperty('ROI').value + + def _find_zero_cells(self, ws): + """ + Finds the cells counting zeros + @param ws: the input workspace + """ + self._zero_cells = [] + size = mtd[ws].blocksize() + for spectrum in range(mtd[ws].getNumberHistograms()): + counts = mtd[ws].readY(spectrum) + if np.count_nonzero(counts) < size/5: + self._zero_cells.append(spectrum) + self._zero_cells.sort() + self.log().information('Found zero counting cells at indices: ' + str(self._zero_cells)) + + def _crop_zero_cells(self, ws, wsIndexList): + """ + Crops out the spectra corresponding to zero counting pixels + @param ws: the input workspace + @param wsIndexList: list of workspace indices to crop out + """ + MaskDetectors(Workspace=ws, WorkspaceIndexList=wsIndexList) + ExtractUnmaskedSpectra(InputWorkspace=ws, OutputWorkspace=ws) + + def _interpolate_zero_cells(self, ws, theta_axis): + """ + Interpolates the counts of zero counting cells linearly from the + nearest non-zero neighbour cells + @param ws: the input workspace + @param theta_axis: the unordered signed 2theta axis + """ + unable_to_interpolate = [] + for cell in self._zero_cells: + prev_cell = cell - 1 + next_cell = cell + 1 + + while prev_cell in self._zero_cells: + prev_cell-=1 + while next_cell in self._zero_cells: + next_cell+=1 + + if prev_cell == -1: + self.log().notice('Unable to interpolate for cell #'+str(cell)+ + ': no non-zero neighbour cell was found on the left side. Bin will be cropped.') + unable_to_interpolate.append(cell) + if next_cell == mtd[ws].getNumberHistograms(): + self.log().notice('Unable to interpolate for cell #'+str(cell)+ + ': no non-zero neighbour cell was found on the right side. Bin will be cropped.') + unable_to_interpolate.append(cell) + + if prev_cell >= 0 and next_cell < mtd[ws].getNumberHistograms(): + theta_prev = theta_axis[prev_cell] + theta = theta_axis[cell] + theta_next = theta_axis[next_cell] + counts_prev = mtd[ws].readY(prev_cell) + errors_prev = mtd[ws].readE(prev_cell) + counts_next = mtd[ws].readY(next_cell) + errors_next = mtd[ws].readE(next_cell) + coefficient = (theta - theta_prev) / (theta_next - theta_prev) + counts = counts_prev + coefficient * (counts_next - counts_prev) + errors = errors_prev + coefficient * (errors_next - errors_prev) + mtd[ws].setY(cell,counts) + mtd[ws].setE(cell,errors) + + self._crop_zero_cells(ws, unable_to_interpolate) + + def _normalise_to_roi(self, ws): + """ + Normalises counts to the sum of counts in the region-of-interest + @param ws : input workspace with raw spectrum axis + """ + roi_ws = self._hide('roi') + theta_ws = self._hide('theta_ROI') + ConvertSpectrumAxis(InputWorkspace=ws, OutputWorkspace=theta_ws, Target='SignedTheta') + roi_pattern = self._parse_roi(theta_ws) + SumSpectra(InputWorkspace=ws, OutputWorkspace=roi_ws, ListOfWorkspaceIndices=roi_pattern) + SumSpectra(InputWorkspace=roi_ws, OutputWorkspace=roi_ws) + Divide(LHSWorkspace=ws, RHSWorkspace=roi_ws, OutputWorkspace=ws) + DeleteWorkspace(roi_ws) + DeleteWorkspace(theta_ws) + + def _parse_roi(self, ws): + """ + Parses the regions of interest string from 2theta ranges to workspace indices + @param ws : input workspace with 2theta as spectrum axis + @returns: roi as workspace indices, e.g. 7-20,100-123 + """ + result = '' + axis = mtd[ws].getAxis(1).extractValues() + index = 0 + while index < len(self._region_of_interest): + start = self._region_of_interest[index] + end = self._region_of_interest[index+1] + start_index = np.argwhere(axis > start) + end_index = np.argwhere(axis < end) + result += str(start_index[0][0])+'-'+str(end_index[-1][0]) + result += ',' + index += 2 + self.log().information('ROI summing pattern is '+result[:-1]) + return result[:-1] + + def _group_spectra(self, ws): + """ + Groups the spectrum axis by summing spectra + @param ws : the input workspace + """ + new_axis = [] + start_index = 0 + axis = mtd[ws].getAxis(1).extractValues() + grouped = self._hide('grouped') + name = grouped + while start_index < len(axis): + end = axis[start_index] + self._rebin_width + end_index = np.argwhere(axis < end)[-1][0] + SumSpectra(InputWorkspace=ws, OutputWorkspace=name, + StartWorkspaceIndex=int(start_index), EndWorkspaceIndex=int(end_index)) + count = end_index - start_index + 1 + Scale(InputWorkspace=name, OutputWorkspace=name, Factor=1./count) + new_axis.append(np.sum(axis[start_index:end_index + 1]) / count) + if name != grouped: + AppendSpectra(InputWorkspace1=grouped, InputWorkspace2=name, OutputWorkspace=grouped) + DeleteWorkspace(name) + start_index = end_index + 1 + name = self._hide('ws_{0}'.format(start_index)) + spectrum_axis = NumericAxis.create(len(new_axis)) + for i in range(len(new_axis)): + spectrum_axis.setValue(i, new_axis[i]) + mtd[grouped].replaceAxis(1, spectrum_axis) + RenameWorkspace(InputWorkspace=grouped, OutputWorkspace=ws) + +# Register the algorithm with Mantid +AlgorithmFactory.subscribe(PowderDiffILLReduction) diff --git a/Framework/PythonInterface/test/python/mantid/kernel/ArrayOrderedPairsValidatorTest.py b/Framework/PythonInterface/test/python/mantid/kernel/ArrayOrderedPairsValidatorTest.py new file mode 100644 index 0000000000000000000000000000000000000000..04f1d6cd4c5672f0521d01e8ca974b0dc0911245 --- /dev/null +++ b/Framework/PythonInterface/test/python/mantid/kernel/ArrayOrderedPairsValidatorTest.py @@ -0,0 +1,54 @@ +from __future__ import (absolute_import, division, print_function) + +import unittest +import testhelpers + +from mantid.kernel import IntArrayOrderedPairsValidator, FloatArrayOrderedPairsValidator, \ + IntArrayProperty, FloatArrayProperty +from mantid.api import PythonAlgorithm + + +class ArrayOrderedPairsValidatorTest(unittest.TestCase): + + def test_fail_odd_entries(self): + alg = self._create_alg() + int_vals = [5,7,13] + float_vals = [2.1] + self.assertRaises(ValueError, alg.setProperty, "IntInput", int_vals) + self.assertRaises(ValueError, alg.setProperty, "FloatInput", float_vals) + + def test_fail_unordered_pairs(self): + alg = self._create_alg() + int_vals = [5, 18, 4, 2] + float_vals = [2.1, 5.7, 4.3, 1.5] + self.assertRaises(ValueError, alg.setProperty, "IntInput", int_vals) + self.assertRaises(ValueError, alg.setProperty, "FloatInput", float_vals) + + def test_pass_ordered_pairs(self): + alg = self._create_alg() + int_vals = [5, 18, 4, 9] + float_vals = [2.1, 5.7, 4.3, 6.7] + testhelpers.assertRaisesNothing(self, alg.setProperty, "IntInput", int_vals) + testhelpers.assertRaisesNothing(self, alg.setProperty, "FloatInput", float_vals) + + def _create_alg(self): + """ + Creates a test algorithm with a ordered pairs validator + """ + class TestAlgorithm(PythonAlgorithm): + + def PyInit(self): + int_validator = IntArrayOrderedPairsValidator() + self.declareProperty(IntArrayProperty("IntInput", int_validator)) + float_validator = FloatArrayOrderedPairsValidator() + self.declareProperty(FloatArrayProperty("FloatInput", float_validator)) + + def PyExec(self): + pass + + alg = TestAlgorithm() + alg.initialize() + return alg + +if __name__ == '__main__': + unittest.main() diff --git a/Framework/PythonInterface/test/python/mantid/kernel/CMakeLists.txt b/Framework/PythonInterface/test/python/mantid/kernel/CMakeLists.txt index acbd8f5ad96ca08de1ba6b980a860585e5cea9dd..4a47fd27834ede72e9266684560673458cdb79ec 100644 --- a/Framework/PythonInterface/test/python/mantid/kernel/CMakeLists.txt +++ b/Framework/PythonInterface/test/python/mantid/kernel/CMakeLists.txt @@ -5,6 +5,7 @@ set ( TEST_PY_FILES ArrayPropertyTest.py ArrayBoundedValidatorTest.py ArrayLengthValidatorTest.py + ArrayOrderedPairsValidatorTest.py BoundedValidatorTest.py CompositeValidatorTest.py ConfigServiceTest.py diff --git a/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/CMakeLists.txt b/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/CMakeLists.txt index 6dba3a3729f4c6e0a70bd3338b25a950506b6f8b..48ef0b2f86a08f013959d37df12dad55434a5abd 100644 --- a/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/CMakeLists.txt +++ b/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/CMakeLists.txt @@ -39,6 +39,7 @@ set ( TEST_PY_FILES MolDynTest.py MSDFitTest.py OSIRISDiffractionReductionTest.py + PowderDiffILLReductionTest.py ResNorm2Test.py SANSDarkRunBackgroundCorrectionTest.py SANSFitShiftScaleTest.py diff --git a/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/PowderDiffILLReductionTest.py b/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/PowderDiffILLReductionTest.py new file mode 100644 index 0000000000000000000000000000000000000000..1b43c454a6d94cb4b812e738092e7a041dffeed9 --- /dev/null +++ b/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/PowderDiffILLReductionTest.py @@ -0,0 +1,92 @@ +from __future__ import (absolute_import, division, print_function) + +import unittest +from mantid.simpleapi import PowderDiffILLReduction +from mantid import config, mtd + + +class PowderDiffILLReductionTest(unittest.TestCase): + + _runs = '967087:967088' + + def setUp(self): + config['default.facility'] = 'ILL' + config['default.instrument'] = 'D20' + config.appendDataSearchSubDir('ILL/D20/') + + def tearDown(self): + mtd.remove('red') + + def test_default_options(self): + red = PowderDiffILLReduction(Run=self._runs) + self.assertTrue(red) + self.assertTrue(not red.isDistribution()) + self.assertTrue(not red.isHistogramData()) + self.assertEquals(red.getNumberHistograms(),2) + self.assertEquals(red.blocksize(),3008) + xaxis = red.getAxis(0).extractValues() + xunit = red.getAxis(0).getUnit().unitID() + self.assertEquals(xunit,'Degrees') + self.assertAlmostEqual(xaxis[0],0.4034,4) + self.assertAlmostEqual(xaxis[-1],150.7534,4) + spectrumaxis = red.getAxis(1).extractValues() + self.assertAlmostEqual(spectrumaxis[0],253.924,5) + self.assertAlmostEqual(spectrumaxis[1],242.82001,5) + self.assertEquals(red.readY(0)[0],644) + self.assertAlmostEqual(red.readE(0)[0],25.3772,4) + self.assertEquals(red.readY(0)[3007], 8468) + self.assertAlmostEqual(red.readE(0)[3007],92.0217,4) + self.assertEquals(red.readY(1)[1],1105) + self.assertAlmostEqual(red.readE(1)[1],33.2415,4) + self.assertEquals(red.readY(0)[1400],9532) + self.assertEquals(red.readY(1)[2100],9789) + + def test_sort_temperature_axis(self): + red = PowderDiffILLReduction(Run=self._runs,SortObservableAxis=True) + self.assertTrue(red) + spectrumaxis = red.getAxis(1).extractValues() + self.assertAlmostEqual(spectrumaxis[0],242.82001,5) + self.assertAlmostEqual(spectrumaxis[1],253.924,5) + + def test_momentum_transfer(self): + red = PowderDiffILLReduction(Run=self._runs,Unit='MomentumTransfer') + self.assertTrue(red) + xunit = red.getAxis(0).getUnit().unitID() + self.assertEquals(xunit,'MomentumTransfer') + + def test_dspacing(self): + red = PowderDiffILLReduction(Run=self._runs,Unit='dSpacing') + self.assertTrue(red) + xunit = red.getAxis(0).getUnit().unitID() + self.assertEquals(xunit,'dSpacing') + + def test_normalise_monitor(self): + red = PowderDiffILLReduction(Run=self._runs,NormaliseTo='Monitor') + self.assertTrue(red) + self.assertAlmostEquals(red.readY(0)[1400],0.00348,5) + self.assertAlmostEquals(red.readY(1)[2100],0.00335,5) + + def test_normalise_time(self): + red = PowderDiffILLReduction(Run=self._runs,NormaliseTo='Time') + self.assertTrue(red) + self.assertAlmostEquals(red.readY(0)[1400],9532/300,4) + self.assertAlmostEquals(red.readY(1)[2100],9789/300,2) + + def test_normalise_roi(self): + red = PowderDiffILLReduction(Run=self._runs,NormaliseTo='ROI',ROI='0,100') + self.assertTrue(red) + self.assertAlmostEquals(red.readY(0)[1400],0.00055,5) + self.assertAlmostEquals(red.readY(1)[2100],0.00053,5) + + def test_crop_zero_counting_cells(self): + red = PowderDiffILLReduction(Run=self._runs,ZeroCountingCells='Crop') + self.assertTrue(red) + self.assertEquals(red.blocksize(), 3002) + + def test_rebin(self): + red = PowderDiffILLReduction(Run=self._runs,ScanAxisBinWidth=12,SortObservableAxis=True) + self.assertEquals(red.getNumberHistograms(), 1) + self.assertAlmostEqual(red.getAxis(1).extractValues()[0], 248.372, 5) + +if __name__ == '__main__': + unittest.main() diff --git a/Testing/Data/DocTest/ILL/D20/967076.nxs.md5 b/Testing/Data/DocTest/ILL/D20/967076.nxs.md5 new file mode 100644 index 0000000000000000000000000000000000000000..53e75259833f01795528ca06fab0168e3c505742 --- /dev/null +++ b/Testing/Data/DocTest/ILL/D20/967076.nxs.md5 @@ -0,0 +1 @@ +30c111f0b00d4261e8fd60ef533577ef diff --git a/Testing/Data/DocTest/ILL/D20/967087.nxs.md5 b/Testing/Data/DocTest/ILL/D20/967087.nxs.md5 new file mode 100644 index 0000000000000000000000000000000000000000..3cfc7558cecc98943bfc010b355cd4038960abd5 --- /dev/null +++ b/Testing/Data/DocTest/ILL/D20/967087.nxs.md5 @@ -0,0 +1 @@ +db09256c0525c0e682a9f2657fcd19da diff --git a/Testing/Data/DocTest/ILL/D20/967088.nxs.md5 b/Testing/Data/DocTest/ILL/D20/967088.nxs.md5 new file mode 100644 index 0000000000000000000000000000000000000000..fa0aab72dcfd541408a48ad4d3f33d86929978e5 --- /dev/null +++ b/Testing/Data/DocTest/ILL/D20/967088.nxs.md5 @@ -0,0 +1 @@ +9ef1d7a85fd27dd2873e3a6141c8dd94 diff --git a/Testing/Data/SystemTest/ILL/D20/967087.nxs.md5 b/Testing/Data/SystemTest/ILL/D20/967087.nxs.md5 new file mode 100644 index 0000000000000000000000000000000000000000..3cfc7558cecc98943bfc010b355cd4038960abd5 --- /dev/null +++ b/Testing/Data/SystemTest/ILL/D20/967087.nxs.md5 @@ -0,0 +1 @@ +db09256c0525c0e682a9f2657fcd19da diff --git a/Testing/Data/SystemTest/ILL/D20/967088.nxs.md5 b/Testing/Data/SystemTest/ILL/D20/967088.nxs.md5 new file mode 100644 index 0000000000000000000000000000000000000000..fa0aab72dcfd541408a48ad4d3f33d86929978e5 --- /dev/null +++ b/Testing/Data/SystemTest/ILL/D20/967088.nxs.md5 @@ -0,0 +1 @@ +9ef1d7a85fd27dd2873e3a6141c8dd94 diff --git a/Testing/Data/UnitTest/ILL/D20/967076.nxs.md5 b/Testing/Data/UnitTest/ILL/D20/967076.nxs.md5 new file mode 100644 index 0000000000000000000000000000000000000000..53e75259833f01795528ca06fab0168e3c505742 --- /dev/null +++ b/Testing/Data/UnitTest/ILL/D20/967076.nxs.md5 @@ -0,0 +1 @@ +30c111f0b00d4261e8fd60ef533577ef diff --git a/Testing/Data/UnitTest/ILL/D20/967087.nxs.md5 b/Testing/Data/UnitTest/ILL/D20/967087.nxs.md5 new file mode 100644 index 0000000000000000000000000000000000000000..3cfc7558cecc98943bfc010b355cd4038960abd5 --- /dev/null +++ b/Testing/Data/UnitTest/ILL/D20/967087.nxs.md5 @@ -0,0 +1 @@ +db09256c0525c0e682a9f2657fcd19da diff --git a/Testing/Data/UnitTest/ILL/D20/967088.nxs.md5 b/Testing/Data/UnitTest/ILL/D20/967088.nxs.md5 new file mode 100644 index 0000000000000000000000000000000000000000..fa0aab72dcfd541408a48ad4d3f33d86929978e5 --- /dev/null +++ b/Testing/Data/UnitTest/ILL/D20/967088.nxs.md5 @@ -0,0 +1 @@ +9ef1d7a85fd27dd2873e3a6141c8dd94 diff --git a/Testing/SystemTests/tests/analysis/ILLPowderDiffDetEffCorrTest.py b/Testing/SystemTests/tests/analysis/ILLPowderDiffDetEffCorrTest.py new file mode 100644 index 0000000000000000000000000000000000000000..57b7c11011be8863af628a936b1583831629dddb --- /dev/null +++ b/Testing/SystemTests/tests/analysis/ILLPowderDiffDetEffCorrTest.py @@ -0,0 +1,33 @@ +from __future__ import (absolute_import, division, print_function) + +import stresstesting +from mantid.simpleapi import PowderDiffILLDetEffCorr, GroupWorkspaces +from mantid import config, mtd + + +class ILLPowderDiffDetEffCorrTest(stresstesting.MantidStressTest): + + def __init__(self): + super(ILLPowderDiffDetEffCorrTest, self).__init__() + self.setUp() + + def setUp(self): + config['default.facility'] = 'ILL' + config['default.instrument'] = 'D20' + config.appendDataSearchSubDir('ILL/D20/') + + def requiredFiles(self): + return ['967076.nxs'] + + def tearDown(self): + mtd.clear() + + def runTest(self): + PowderDiffILLDetEffCorr(CalibrationRun='967076.nxs', + OutputWorkspace='calib', + OutputResponseWorkspace='response') + GroupWorkspaces(InputWorkspaces=['calib','response'], OutputWorkspace='group') + + def validate(self): + self.tolerance = 0.0001 + return ['group', 'ILL_D20_calib_def.nxs'] diff --git a/Testing/SystemTests/tests/analysis/ILLPowderDiffReductionTest.py b/Testing/SystemTests/tests/analysis/ILLPowderDiffReductionTest.py new file mode 100644 index 0000000000000000000000000000000000000000..6900da2b466634b311182da3f05eb9254e3c9e27 --- /dev/null +++ b/Testing/SystemTests/tests/analysis/ILLPowderDiffReductionTest.py @@ -0,0 +1,32 @@ +from __future__ import (absolute_import, division, print_function) + +import stresstesting +from mantid.simpleapi import PowderDiffILLReduction +from mantid import config, mtd + + +class ILLPowderDiffReductionTest(stresstesting.MantidStressTest): + + def __init__(self): + super(ILLPowderDiffReductionTest, self).__init__() + self.setUp() + + def setUp(self): + config['default.facility'] = 'ILL' + config['default.instrument'] = 'D20' + config.appendDataSearchSubDir('ILL/D20/') + + def requiredFiles(self): + return ['967087.nxs', '967088.nxs'] + + def tearDown(self): + mtd.clear() + + def runTest(self): + PowderDiffILLReduction(Run='967087,967088',OutputWorkspace='reduced') + + def validate(self): + self.tolerance = 0.0001 + # something goes wrong with DetInfo when saving loading nexus processed + self.disableChecking.append("Instrument") + return ['reduced', 'ILL_D20_red_def.nxs'] diff --git a/Testing/SystemTests/tests/analysis/LoadILLDiffractionTest.py b/Testing/SystemTests/tests/analysis/ILLPowderLoadDetectorScanTest.py similarity index 50% rename from Testing/SystemTests/tests/analysis/LoadILLDiffractionTest.py rename to Testing/SystemTests/tests/analysis/ILLPowderLoadDetectorScanTest.py index 1d2130bd75d8798e768dfd1dc6362da5a9729907..efe33af322e9dfd1ba73abd4f9339545caab08bb 100644 --- a/Testing/SystemTests/tests/analysis/LoadILLDiffractionTest.py +++ b/Testing/SystemTests/tests/analysis/ILLPowderLoadDetectorScanTest.py @@ -4,10 +4,12 @@ from mantid.simpleapi import LoadILLDiffraction from mantid import config -class LoadILLDiffractionTest(stresstesting.MantidStressTest): +# TODO: Once the nexus saver for a scanned workspace is implemented, +# replace the assertions with compare workspaces with the reference +class ILLPowderLoadDetectorScanTest(stresstesting.MantidStressTest): def __init__(self): - super(LoadILLDiffractionTest, self).__init__() + super(ILLPowderLoadDetectorScanTest, self).__init__() self.setUp() def requiredFiles(self): @@ -15,20 +17,11 @@ class LoadILLDiffractionTest(stresstesting.MantidStressTest): return ["967076.nxs"] def setUp(self): - # cache default instrument and datadirs - self.facility = config['default.facility'] - self.instrument = config['default.instrument'] - self.datadirs = config['datasearch.directories'] config['default.facility'] = 'ILL' config['default.instrument'] = 'D20' config.appendDataSearchSubDir('ILL/D20/') - def tearDown(self): - config['default.facility'] = self.facility - config['default.instrument'] = self.instrument - config['datasearch.directories'] = self.datadirs - def d20_detector_scan_test(self): # tests the loading for D20 calibration run (detector scan) ws = LoadILLDiffraction('967076.nxs') @@ -36,11 +29,17 @@ class LoadILLDiffractionTest(stresstesting.MantidStressTest): self.assertEquals(ws.getNumberHistograms(), (2 * 1536 + 1) * 571) self.assertEquals(ws.readY(0)[0], 523944) self.assertDelta(ws.readE(0)[0], 723.8397, 0.0001) - self.assertEquals(ws.readY(483879)[0], 6541) - self.assertDelta(ws.readE(483879)[0], 80.8764, 0.0001) + self.assertEquals(ws.readY(570)[0], 523819) + self.assertDelta(ws.readE(570)[0], 723.7534, 0.0001) + self.assertEquals(ws.readY(571)[0], 0) + self.assertEquals(ws.readE(571)[0], 0) + self.assertEquals(ws.readY(37114)[0], 0) + self.assertEquals(ws.readE(37114)[0], 0) + self.assertEquals(ws.readY(37115)[0], 6111) + self.assertDelta(ws.readE(37115)[0], 78.1728, 0.0001) + self.assertEquals(ws.readY(1754682)[0], 4087) + self.assertDelta(ws.readE(1754682)[0], 63.9296, 0.0001) def runTest(self): self.d20_detector_scan_test() - - self.tearDown() diff --git a/Testing/SystemTests/tests/analysis/reference/ILL_D20_calib_def.nxs.md5 b/Testing/SystemTests/tests/analysis/reference/ILL_D20_calib_def.nxs.md5 new file mode 100644 index 0000000000000000000000000000000000000000..e60cf87cf13456d60f870fb8245bf9ed6a9c1dc2 --- /dev/null +++ b/Testing/SystemTests/tests/analysis/reference/ILL_D20_calib_def.nxs.md5 @@ -0,0 +1 @@ +d496944fad101f132e4cd6b935e31327 diff --git a/Testing/SystemTests/tests/analysis/reference/ILL_D20_red_def.nxs.md5 b/Testing/SystemTests/tests/analysis/reference/ILL_D20_red_def.nxs.md5 new file mode 100644 index 0000000000000000000000000000000000000000..c2755a43781762c9db4007c90c5de52b36b5a754 --- /dev/null +++ b/Testing/SystemTests/tests/analysis/reference/ILL_D20_red_def.nxs.md5 @@ -0,0 +1 @@ +50015278f63e66763580a15aa5354cd3 diff --git a/docs/source/algorithms/LoadILLDiffraction-v1.rst b/docs/source/algorithms/LoadILLDiffraction-v1.rst index 43a342602cb1c231c6b73956a6ba4ee904c398b8..95c3221834d56955edf989dd43e1b4144201c138 100644 --- a/docs/source/algorithms/LoadILLDiffraction-v1.rst +++ b/docs/source/algorithms/LoadILLDiffraction-v1.rst @@ -24,8 +24,8 @@ For D20 1-dimensional detector, it supports 3 resolution modes: - **high** is when each pixel is split by 3, resulting in 4608 pixels. This corresponds to IDF *D20_hr*. -Note, that all the IDFs contain only active pixels, and do not count the first and last banks which are inactive. -These are the first and last 32 pixels in low resolution mode. +Note, that all the IDFs contain only active pixels, and do not count the last 2 banks which are permanently inactive. + The *2theta* value of the first pixel is read from the file, and the whole detector is rotated correspondingly. Scans @@ -33,11 +33,11 @@ Scans The loader is able to load the following scan configurations: -- **no scan**, used for D20, when a single file contains a single dataset, e.g. data acquired at one temperature point. In this case x-axis is just a single point. +- **no scan**, used for D20, when a single file contains a single dataset, e.g. data acquired with static detector at a single temperature point. In this case x-axis is just a single point. - **detector scan**, used always for D2B, and for D20 calibration runs, when the detector moves during the run. In this configuration the output is a *scanning workspace* containing one spectrum for each pixel at each time index. The x-axis is again a single point. -- **other scan**, e.g. omega scan for D20, which is another type of motor scan, but the detector does not move. In this case, the data in the raw file is organised just as for *detector scan*, but the output workspace is not a *scanning workspace*. It is a regular workspace with x-axis corresponding to the scanned variable, e.g. omega angle. +- **other scan**, e.g. omega scan for D20, which is another type of motor scan, but it is not the detector that moves, but the sample. In this case, the data in the raw file is organised just as for *detector scan*, but the output workspace is not a *scanning workspace*. It is a regular workspace with x-axis corresponding to the scanned variable, e.g. omega angle. Logs #### @@ -93,4 +93,3 @@ Output: .. categories:: .. sourcelink:: - diff --git a/docs/source/algorithms/MostLikelyMean-v1.rst b/docs/source/algorithms/MostLikelyMean-v1.rst new file mode 100644 index 0000000000000000000000000000000000000000..0573e797c957c37533b9555ab8fb8af7ea40dc6f --- /dev/null +++ b/docs/source/algorithms/MostLikelyMean-v1.rst @@ -0,0 +1,37 @@ + +.. algorithm:: + +.. summary:: + +.. alias:: + +.. properties:: + +Description +----------- + +This algorithm finds a so-called most-likely mean of an array being the element that has the minimum of the summed square-rooted absolute distances with all the other elements in the array: + +.. math:: MostLikelyMean(a) = a[minindex_{j}\sum_{i} \sqrt{|a_{i} - a_{j}|}] + +Usage +----- + +**Example - MostLikelyMean** + +.. testcode:: MostLikelyMeanExample + + import numpy + array = numpy.arange(100) + mlm = MostLikelyMean(array) + print(mlm) + +Output: + +.. testoutput:: MostLikelyMeanExample + + 49.0 + +.. categories:: + +.. sourcelink:: diff --git a/docs/source/algorithms/PowderDiffILLDetEffCorr-v1.rst b/docs/source/algorithms/PowderDiffILLDetEffCorr-v1.rst new file mode 100644 index 0000000000000000000000000000000000000000..972a9b365e3083ef18a9dee3164b213c25db5e46 --- /dev/null +++ b/docs/source/algorithms/PowderDiffILLDetEffCorr-v1.rst @@ -0,0 +1,93 @@ +.. algorithm:: + +.. summary:: + +.. alias:: + +.. properties:: + +Description +----------- + +This algorithm calculates the detector efficiency corrections for the instrument D20 at the ILL. + +It performs as follows: + +1. Takes the first cell response as a function of scattering angle as a reference + +2. Takes the second cell, and divides the reference to the second cell response in the overlapping region in terms of scattering angle. This results in the array of relative response ratios. + +3. Uses one of the 3 suggested options to compute the relative calibration factor from the array of relative response ratios. This factor is saved in the output as the calibration factor for the second cell. + +4. The response of the second cell is scaled up with that factor, and then merged (in the overlapping region) with the reference using :ref:`WeightedMean <algm-WeightedMean>`. + +5. Repeat from Step 2 for the next cell and so on until the last cell. + +For the zero-counting cells, the calibration factor cannot be computed, and it will be set to 1. Cells are treated as zero-counting, if they count zero more than 80% of time. + +After the calibration factors are computed for all the cells, they are divided by the median of all the factors (excluding the zero counting cells), +in order to absolutely normalise the calibration curve. + +Input +----- + +The input must be a single **detector-scan** run in `.nxs` format produced for vanadium. + +Optionally the previously derived calibration file can be seeded, and the algorithm will then compute the residual calibration factors on top of that. + +Method +------ + +You can choose the method of how the relative calibration factor is extracted from the relative response ratios (Step 3). +It can be the Median (default), Mean or :ref:`MostLikelyMean <algm-MostLikelyMean>`. + +Excluded range +-------------- + +Provide ranges in scattering angle in degrees, to exclude non-desired regions, e.g. the beam stop. +Multiple regions can be set, **-20,0,10,20** will exclude **[-20,0]** and **[10,20]**. +This exclusion will happen at Step 3. + +Pixel range +----------- + +Provide the range of detector cells to compute the calibration factors for. +For the rest of the cells, the factor will be set to 1. + +Output +------ + +Output will be a single-column workspace containing the calibration factors for each cell. This should be normally saved with +:ref:`SaveNexusProcessed <algm-SaveNexusProcessed>` to be later used in :ref:`PowderDiffILLReduction <algm-PowderDiffILLReduction>`. + +Optionally, the full absolute response resulted from the combination of the data for all the cells (see Step 4 above) can also be output. + +Workflow +-------- + +.. diagram:: PowderDiffILLDetEffCorr-v1_wkflw.dot + +Related Algorithms +------------------ + +:ref:`PowderDiffILLReduction <algm-PowderDiffILLReduction>` performs the data reduction. + +Usage +----- + +**Example - PowderDiffILLDetEffCorr** + +.. code-block:: python + + calib = PowderDiffILLDetEffCorr(CalibrationRun='967076', OutputWorkspace='constants') + print("Reduced workspace contains {0} constants, one for each cell.".format(calib.getNumberHistograms())) + +Output: + +.. code-block:: python + + Reduced workspace contains 3072 constants, one for each cell. + +.. categories:: + +.. sourcelink:: diff --git a/docs/source/algorithms/PowderDiffILLReduction-v1.rst b/docs/source/algorithms/PowderDiffILLReduction-v1.rst new file mode 100644 index 0000000000000000000000000000000000000000..cf30acd6246065db812cd874ee491949aa30fdef --- /dev/null +++ b/docs/source/algorithms/PowderDiffILLReduction-v1.rst @@ -0,0 +1,92 @@ +.. algorithm:: + +.. summary:: + +.. alias:: + +.. properties:: + +Description +----------- + +This algorithm performs the data reduction for D20 instrument at the ILL. + +Input runs +---------- + +Provide the list of the input runs (e.g. one `.nxs` file corresponding to a single temperature point) following the syntax in +:py:obj:`MultipleFileProperty <mantid.api.MultipleFileProperty>`. + +Summing of individual runs is possible, and if requested it will be carried out by :ref:`MergeRuns <algm-MergeRuns>`, taking into account the +metadata. + +The list of unsummed files will be eventually combined with :ref:`ConjoinXRuns <algm-ConjoinXRuns>`. + +Scanning observable +------------------- + +The scanning observable can be any numeric sample log entry (scalar or time series). +It is possible to request ordering and/or equidistant rebinning of the observable axis for the final result. + +Calibration file +---------------- + +This has to be a processed `.nxs` file containing a single-column workspace of calibration constants generated by +:ref:`PowderDiffILLCalibration <algm-PowderDiffILLCalibration>`, and then saved with :ref:`SaveNexusProcessed <algm-SaveNexusProcessed>`. + +Normalisation options +--------------------- + +Choose one of the 4 options suggested. If region-of-interest (ROI) normalisation is requested, provide ROI as a list of ranges in scattering angle in degrees, +for example **10,20,50,100** will mean **[10-20]** and **[50-100]**. + +Output +------ + +The output of the algorithm is a single workspace, one spectrum (diffractogram) per each observable (e.g. temperature) point. +This algorithm does not save the output to a file itself. +Use :ref:`SaveFocusedXYE <algm-SaveFocusedXYE>` to save in FullProf format #10, or :ref:`SaveGSS <algm-SaveGSS>` for GSAS format. + +Workflow +-------- + +.. diagram:: PowderDiffILLReduction-v1_wkflw.dot + +Related Algorithms +------------------ + +:ref:`PowderDiffILLCalibration <algm-PowderDiffILLCalibration>` reads a detector-scan run and calculates the detector efficiency constants. + +Usage +----- + +**Example - PowderDiffILLReduction** + +.. testsetup:: ExPowderDiffILLReduction + + config['default.facility'] = 'ILL' + config['default.instrument'] = 'D20' + config.appendDataSearchSubDir('ILL/D20/') + +.. testcode:: ExPowderDiffILLReduction + + red_ws = PowderDiffILLReduction(Run='967087,967088') + print("Reduced workspace has {0} diffractograms having {1} bins each".format(red_ws.getNumberHistograms(),red_ws.blocksize())) + print("The first one corresponds to T={0:.2f} K".format(red_ws.getAxis(1).extractValues()[0])) + print("The first one corresponds to T={0:.2f} K".format(red_ws.getAxis(1).extractValues()[1])) + +Output: + +.. testoutput:: ExPowderDiffILLReduction + + Reduced workspace has 2 diffractograms having 3008 bins each + The first one corresponds to T=253.92 K + The first one corresponds to T=242.82 K + +.. testcleanup:: ExPowderDiffILLReduction + + mtd.remove('red_ws') + +.. categories:: + +.. sourcelink:: diff --git a/docs/source/diagrams/PowderDiffILLDetEffCorr-v1_wkflw.dot b/docs/source/diagrams/PowderDiffILLDetEffCorr-v1_wkflw.dot new file mode 100644 index 0000000000000000000000000000000000000000..10ceee7c46f2015604deaa7414002a0b3c72b599 --- /dev/null +++ b/docs/source/diagrams/PowderDiffILLDetEffCorr-v1_wkflw.dot @@ -0,0 +1,91 @@ +digraph PowderDiffILLDetEffCorr { + label="PowderDiffILLDetEffCorr flowchart" + $global_style + + subgraph values { + $value_style + reference + factor + single_cell_response + single_cell_responseAfter [label="single_cell_response"] + factors + } + + subgraph decision { + $decision_style + CalibrationMethod + CalibrationProvided + NormaliseTo + InterpolateOverlappingAngles + } + + subgraph params { + $param_style + CalibrationRun + CalibrationFile + ExcludedRange + OutputWorkspace + OutputResponseWorkspace + PixelRange + } + + subgraph algorithms { + $algorithm_style + LoadILLDiffraction + MostLikelyMean + WeightedMean + ConvertSpectrumAxis + Divide + Scale + SplineInterpolation + } + + subgraph process { + $process_style + normalise + calibrate + absolute_normalise + exclude_ranges + extract_single_cell + mean + median + } + + CalibrationRun -> LoadILLDiffraction [label="DetectorScan"] + LoadILLDiffraction -> CalibrationProvided + CalibrationProvided -> calibrate [label="Yes"] + CalibrationFile -> calibrate + calibrate -> normalise + CalibrationProvided -> normalise [label="No"] + NormaliseTo -> normalise + normalise -> ConvertSpectrumAxis + ConvertSpectrumAxis -> extract_single_cell [label="i-th"] + PixelRange -> extract_single_cell + extract_single_cell -> single_cell_response + single_cell_response -> reference [label="For the first pixel"] + reference -> Divide [label="LHS"] + single_cell_response -> InterpolateOverlappingAngles + InterpolateOverlappingAngles -> SplineInterpolation [label="yes"] + single_cell_response -> SplineInterpolation [label="WorkspaceToInterpolate"] + InterpolateOverlappingAngles -> Divide [label="No"] + reference -> SplineInterpolation [label="WorkspaceToMatch"] + SplineInterpolation -> Divide [label="RHS"] + Divide -> exclude_ranges + ExcludedRange -> exclude_ranges + exclude_ranges -> CalibrationMethod + CalibrationMethod -> MostLikelyMean [label="MostLikelyMean"] + CalibrationMethod -> mean [label="Mean"] + CalibrationMethod -> median [label="Median"] + MostLikelyMean -> factor + mean -> factor + median -> factor + Scale -> single_cell_responseAfter + factor -> single_cell_responseAfter + single_cell_responseAfter -> WeightedMean + reference -> WeightedMean + WeightedMean -> reference + factor -> factors + factors -> absolute_normalise + absolute_normalise -> OutputWorkspace + reference -> OutputResponseWorkspace +} diff --git a/docs/source/diagrams/PowderDiffILLReduction-v1_wkflw.dot b/docs/source/diagrams/PowderDiffILLReduction-v1_wkflw.dot new file mode 100644 index 0000000000000000000000000000000000000000..f74fa4d8d5604dea222f8be0ed45b106f2f0e9cc --- /dev/null +++ b/docs/source/diagrams/PowderDiffILLReduction-v1_wkflw.dot @@ -0,0 +1,83 @@ +digraph PowderDiffILLReduction { + label="PowderDiffILLReduction flowchart" + $global_style + + subgraph values { + $value_style + } + + subgraph decision { + $decision_style + CalibrationProvided + NormaliseTo + SortObservableAxis + ROCCorrectionProvided + } + + subgraph params { + $param_style + Run + ROI + Observable + CalibrationFile + OutputWorkspace + ScanAxisBinWidth + ROCCorrectionFile + Unit + } + + subgraph algorithms { + $algorithm_style + LoadILLDiffraction + ConjoinXRuns + SortXAxis + ConvertSpectrumAxis + Transpose + Calibrate [label="Multiply"] + ROCCorrect [label="Multiply"] + NormaliseToMonitor [label="Divide"] + NormaliseToTime [label="Divide"] + NormaliseToROI [label="Divide"] + LoadCalibration [label="LoadNexusProcessed"] + LoadROC [label="LoadNexusProcessed"] + } + + subgraph processes { + $process_style + crop_negative_2theta + treat_zero_counting_cells + group_spectra + } + + Run -> LoadILLDiffraction + LoadILLDiffraction -> ConjoinXRuns + Observable -> ConjoinXRuns + ConjoinXRuns -> NormaliseTo + NormaliseTo -> NormaliseToMonitor [label="Monitor"] + NormaliseTo -> NormaliseToROI [label="ROI"] + ROI -> NormaliseToROI + NormaliseTo -> NormaliseToTime [label="Time"] + NormaliseToTime -> CalibrationProvided + NormaliseToROI -> CalibrationProvided + NormaliseToMonitor -> CalibrationProvided + CalibrationProvided -> Calibrate [label="Yes"] + CalibrationFile -> LoadCalibration + LoadCalibration -> Calibrate + CalibrationProvided -> ROCCorrectionProvided [label="No"] + Calibrate -> ROCCorrectionProvided + ROCCorrectionProvided -> ROCCorrect [label="Yes"] + ROCCorrectionFile -> LoadROC + LoadROC -> ROCCorrect + ROCCorrect -> SortObservableAxis + ROCCorrectionProvided -> SortObservableAxis [label="No"] + SortObservableAxis -> SortXAxis [label="Yes"] + SortXAxis -> crop_negative_2theta + SortObservableAxis -> crop_negative_2theta + crop_negative_2theta -> treat_zero_counting_cells + treat_zero_counting_cells -> ConvertSpectrumAxis + Unit -> ConvertSpectrumAxis + ConvertSpectrumAxis -> Transpose + ScanAxisBinWidth -> group_spectra + Transpose -> group_spectra + group_spectra -> OutputWorkspace +} diff --git a/docs/source/release/v3.12.0/diffraction.rst b/docs/source/release/v3.12.0/diffraction.rst index d818f3b1f7f5cc599816cbb0b965b7048fca5318..816a271142c00fcdfbb874c2f49b3f030b1c4cd8 100644 --- a/docs/source/release/v3.12.0/diffraction.rst +++ b/docs/source/release/v3.12.0/diffraction.rst @@ -21,6 +21,7 @@ Powder Diffraction - New algorithm :ref:`algm-EstimateDivergence` estimates the beam divergence due to finite slit size - :ref:`PDCalibration <algm-PDCalibration>` returns three more diagnostic workspaces: one for the fitted peak heights, one for the fitted peak widths, and one for observed resolution. - :ref:`LoadILLDiffraction <algm-LoadILLDiffraction>` now supports D2B files with calibrated data. +- :ref:`PowderDiffILLReduction <algm-PowderDiffILLReduction>` and :ref:`PowderDiffILLCalibration <algm-PowderDiffILLCalibration>` enable the basic data reduction for D20 scanning powder diffractometer at ILL. - New algorithm :ref:`algm-SumOverlappingTubes` combines a detector scan for D2B into a single workspace. - ISIS Powder scripts for HRPD now support extra TOF windows 10-50 and 180-280 - After calling create_vanadium and focus in ISIS Powder scripts on POLARIS, the output workspaces always contain the sample material if it is set using set_sample_material. (To view the sample material, right click the workspace and click 'Sample Material...') diff --git a/docs/source/release/v3.12.0/framework.rst b/docs/source/release/v3.12.0/framework.rst index 4c87dd1226c07f0ca1f295b5c9a994688e07ef60..d33d612b0d4493c8ef5741f8fba64bfe8b1fa184 100644 --- a/docs/source/release/v3.12.0/framework.rst +++ b/docs/source/release/v3.12.0/framework.rst @@ -28,6 +28,7 @@ Algorithms - :ref:`ConjoinWorkspaces <algm-ConjoinWorkspaces>` now supports non-constant bins. - :ref:`Fit <algm-Fit>` will now respect excluded ranges when ``CostFunction = 'Unweighted least squares'``. - :ref:`NormaliseToMonitor <algm-NormaliseToMonitor>` now supports non-constant number of bins. +- :ref:`MostLikelyMean <algm-MostLikelyMean>` is a new algorithm that computes the mean of the given array, that has the least distance from the rest of the elements. - :ref:`LoadAndMerge <algm-LoadAndMerge>` is a new algorithm that can load and merge multiple runs. - :ref:`CompressEvents <algm-CompressEvents>` now supports compressing events with pulse time. - :ref:`MaskBins <algm-MaskBins>` now uses a modernized and standardized way for providing a list of workspace indices. For compatibility reasons the previous ``SpectraList`` property is still supported.