Commit b396588f authored by Lamar Moore's avatar Lamar Moore
Browse files

Re #15542 merge with master

parent 8ecd3c4f
......@@ -90,7 +90,7 @@ void GetEi2::init()
declareProperty(
"FirstMonitorIndex", 0,
"The spectrum index of the first montitor in the input workspace.",
"The workspace index of the first monitor in the input workspace.",
Direction::Output);
declareProperty("Tzero", 0.0, "", Direction::Output);
......@@ -344,7 +344,7 @@ double GetEi2::calculatePeakPosition(size_t ws_index, double t_min,
* @param end :: the number of the last bin to include (starts counting bins at
* 0)
* @return The cropped workspace
* @throw out_of_range if start, end or specInd are set outside of the vaild
* @throw out_of_range if start, end or ws_index are set outside of the valid
* range for the workspace
* @throw runtime_error if the algorithm just falls over
* @throw invalid_argument if the input workspace does not have common binning
......
......@@ -89,7 +89,7 @@ void IntegrateByComponent::exec() {
integratedWS->getInstrument();
PARALLEL_FOR1(integratedWS)
for (int i = 0; i < static_cast<int>(hists.size()); ++i) {
for (int i = 0; i < static_cast<int>(hists.size()); ++i) { // NOLINT
PARALLEL_START_INTERUPT_REGION
const std::set<detid_t> &detids =
......@@ -131,7 +131,7 @@ void IntegrateByComponent::exec() {
}
PARALLEL_FOR1(integratedWS)
for (int i = 0; i < static_cast<int>(hists.size()); ++i) {
for (int i = 0; i < static_cast<int>(hists.size()); ++i) { // NOLINT
PARALLEL_START_INTERUPT_REGION
const std::set<detid_t> &detids =
integratedWS->getSpectrum(hists[i])
......
......@@ -175,7 +175,8 @@ void MaskBins::execEvent() {
if (!this->spectra_list.empty()) {
// Specific spectra were specified
PARALLEL_FOR1(outputWS)
for (int i = 0; i < static_cast<int>(this->spectra_list.size()); ++i) {
for (int i = 0; i < static_cast<int>(this->spectra_list.size()); // NOLINT
++i) {
PARALLEL_START_INTERUPT_REGION
outputWS->getEventList(this->spectra_list[i]).maskTof(m_startX, m_endX);
progress.report();
......
#include "MantidAlgorithms/MatrixWorkspaceAccess.h"
namespace Mantid {
namespace Algorithms {
///@cond Doxygen has problems for decltype for some reason.
/// Returns std::mem_fn object refering to MatrixWorkspace:dataX().
decltype(std::mem_fn(
(std::vector<double> & (API::MatrixWorkspace::*)(const std::size_t)) &
API::MatrixWorkspace::dataX)) MatrixWorkspaceAccess::x =
std::mem_fn(
(std::vector<double> & (API::MatrixWorkspace::*)(const std::size_t)) &
API::MatrixWorkspace::dataX);
///@endcond
}
}
#include "MantidAlgorithms/MaxEnt.h"
#include "MantidAPI/MatrixWorkspace.h"
#include "MantidAPI/TextAxis.h"
#include "MantidAPI/WorkspaceFactory.h"
#include "MantidAlgorithms/MaxEnt/MaxentEntropyNegativeValues.h"
#include "MantidAlgorithms/MaxEnt/MaxentEntropyPositiveValues.h"
#include "MantidKernel/BoundedValidator.h"
#include "MantidKernel/ListValidator.h"
#include "MantidKernel/UnitFactory.h"
#include <boost/make_shared.hpp>
#include <boost/shared_array.hpp>
#include <gsl/gsl_linalg.h>
......@@ -20,6 +23,27 @@ using namespace Kernel;
// Register the algorithm into the AlgorithmFactory
DECLARE_ALGORITHM(MaxEnt)
namespace {
// Maps defining the inverse caption and label for the reconstructed image
// Example:
// The input workspaces (X axis) is in (Time, s)
// The output image should be in (Frequency, Hz)
// Defines the new caption
std::map<std::string, std::string> inverseCaption = {{"Time", "Frequency"},
{"Frequency", "Time"},
{"d-Spacing", "q"},
{"q", "d-Spacing"}};
// Defines the new label
std::map<std::string, std::string> inverseLabel = {{"s", "Hz"},
{"microsecond", "MHz"},
{"Hz", "s"},
{"MHz", "microsecond"},
{"Angstrom", "Angstrom^-1"},
{"Angstrom^-1", "Angstrom"}};
}
//----------------------------------------------------------------------------------------------
/** Constructor
*/
......@@ -68,6 +92,14 @@ void MaxEnt::init() {
"must be positive. It can take "
"negative values otherwise");
auto mustBePositive = boost::make_shared<BoundedValidator<size_t>>();
mustBePositive->setLower(0);
declareProperty(make_unique<PropertyWithValue<size_t>>(
"DensityFactor", 1, mustBePositive, Direction::Input),
"An integer number indicating the factor by which the number "
"of points will be increased in the image and reconstructed "
"data");
auto mustBeNonNegative = boost::make_shared<BoundedValidator<double>>();
mustBeNonNegative->setLower(1E-12);
declareProperty(make_unique<PropertyWithValue<double>>(
......@@ -91,8 +123,8 @@ void MaxEnt::init() {
"MaxAngle", 0.05, mustBeNonNegative, Direction::Input),
"Maximum degree of non-parallelism between S and C");
auto mustBePositive = boost::make_shared<BoundedValidator<size_t>>();
mustBePositive->setLower(0);
mustBePositive = boost::make_shared<BoundedValidator<size_t>>();
mustBePositive->setLower(1);
declareProperty(make_unique<PropertyWithValue<size_t>>(
"MaxIterations", 20000, mustBePositive, Direction::Input),
"Maximum number of iterations");
......@@ -124,22 +156,48 @@ std::map<std::string, std::string> MaxEnt::validateInputs() {
std::map<std::string, std::string> result;
// X values in input workspace must be equally spaced
MatrixWorkspace_sptr inWS = getProperty("InputWorkspace");
const MantidVec &X = inWS->readX(0);
const double dx = X[1] - X[0];
for (size_t i = 1; i < X.size() - 2; i++) {
if (std::abs(dx - X[i + 1] + X[i]) / dx > 1e-7) {
result["InputWorkspace"] =
"X axis must be linear (all bins must have the same width)";
if (inWS) {
// 1. X values in input workspace must be (almost) equally spaced
const double warningLevel = 0.01;
const double errorLevel = 0.5;
bool printWarning = false;
// Average spacing
const MantidVec &X = inWS->readX(0);
const double dx =
(X[X.size() - 1] - X[0]) / static_cast<double>(X.size() - 1);
for (size_t i = 1; i < X.size() - 1; i++) {
// 1% accuracy exceeded, but data still usable
if (std::abs(X[i] - X[0] - static_cast<double>(i) * dx) / dx >
warningLevel) {
printWarning = true;
if (std::abs(X[i] - X[0] - static_cast<double>(i) * dx) / dx >
errorLevel) {
// 50% accuracy exceeded, data not usable
printWarning = false;
result["InputWorkspace"] =
"X axis must be linear (all bins have same width)";
break;
}
}
}
if (printWarning) {
g_log.warning() << "Bin widths differ by more than " << warningLevel * 100
<< "% of average\n";
}
}
size_t nhistograms = inWS->getNumberHistograms();
bool complex = getProperty("ComplexData");
if (complex && (nhistograms % 2))
result["InputWorkspace"] = "The number of histograms in the input "
"workspace must be even for complex data";
// 2. If the input signal is complex, we expect an even number of histograms
// in the input workspace
size_t nhistograms = inWS->getNumberHistograms();
bool complex = getProperty("ComplexData");
if (complex && (nhistograms % 2))
result["InputWorkspace"] = "The number of histograms in the input "
"workspace must be even for complex data";
}
return result;
}
......@@ -154,6 +212,8 @@ void MaxEnt::exec() {
bool complex = getProperty("ComplexData");
// Image must be positive?
bool positiveImage = getProperty("PositiveImage");
// Increase the number of points in the image by this factor
size_t densityFactor = getProperty("DensityFactor");
// Background (default level, sky background, etc)
double background = getProperty("A");
// Chi target
......@@ -174,7 +234,7 @@ void MaxEnt::exec() {
// Number of spectra
size_t nspec = inWS->getNumberHistograms();
// Number of data points
size_t npoints = inWS->blocksize();
size_t npoints = inWS->blocksize() * densityFactor;
// Number of X bins
size_t npointsX = inWS->isHistogramData() ? npoints + 1 : npoints;
......@@ -549,9 +609,11 @@ void MaxEnt::populateOutputWS(const MatrixWorkspace_sptr &inWS, size_t spec,
MantidVec YI(npoints);
MantidVec E(npoints, 0.);
double x0 = inWS->readX(spec)[0];
double dx = inWS->readX(spec)[1] - x0;
if (isImage) {
double dx = inWS->readX(spec)[1] - inWS->readX(spec)[0];
double delta = 1. / dx / npoints;
int isOdd = (inWS->blocksize() % 2) ? 1 : 0;
......@@ -563,16 +625,31 @@ void MaxEnt::populateOutputWS(const MatrixWorkspace_sptr &inWS, size_t spec,
}
if (npointsX == npoints + 1)
X[npoints] = X[npoints - 1] + delta;
// Caption & label
auto inputUnit = inWS->getAxis(0)->unit();
if (inputUnit) {
boost::shared_ptr<Kernel::Units::Label> lblUnit =
boost::dynamic_pointer_cast<Kernel::Units::Label>(
UnitFactory::Instance().create("Label"));
if (lblUnit) {
lblUnit->setLabel(
inverseCaption[inWS->getAxis(0)->unit()->caption()],
inverseLabel[inWS->getAxis(0)->unit()->label().ascii()]);
outWS->getAxis(0)->unit() = lblUnit;
}
}
} else {
for (int i = 0; i < npoints; i++) {
X[i] = inWS->readX(spec)[i];
X[i] = x0 + i * dx;
YR[i] = result[2 * i];
YI[i] = result[2 * i + 1];
}
if (npointsX == npoints + 1)
X[npoints] = inWS->readX(spec)[npoints];
X[npoints] = x0 + npoints * dx;
}
// Reconstructed image
outWS->dataX(spec).assign(X.begin(), X.end());
outWS->dataY(spec).assign(YR.begin(), YR.end());
......
......@@ -31,24 +31,19 @@ void MaxentData::loadReal(const std::vector<double> &data,
// Data and errors must have the same number of points
throw std::runtime_error("Couldn't load invalid data");
}
if (image.size() != 2 * data.size()) {
// If data and errors have N datapoints, image should have 2N datapoints
throw std::runtime_error("Couldn't load invalid data");
if (image.size() % (2 * data.size())) {
// If data and errors have N datapoints, image should have 2*F*N datapoints
// Where F is an integer factor
throw std::runtime_error("Couldn't load invalid image");
}
if (background == 0) {
throw std::runtime_error("Background must be positive");
}
// Set to -1, these will be calculated later
m_angle = -1.;
m_chisq = -1.;
// Load image, calculated data and background
m_image = image;
m_background = background;
correctImage();
m_dataCalc = transformImageToData(image);
size_t size = data.size();
initImageSpace(image, background);
m_data = std::vector<double>(2 * size);
m_errors = std::vector<double>(2 * size);
// Load the experimental (measured data)
......@@ -88,25 +83,19 @@ void MaxentData::loadComplex(const std::vector<double> &dataRe,
// Real and imaginary components must have the same number of datapoints
throw std::runtime_error("Couldn't load invalid data");
}
if (2 * dataRe.size() != image.size()) {
if (image.size() % (2 * dataRe.size())) {
// If real and imaginary parts have N datapoints, image should have 2N
// datapoints
throw std::runtime_error("Couldn't load invalid data");
throw std::runtime_error("Couldn't load invalid image");
}
if (background == 0) {
throw std::runtime_error("Background must be positive");
}
// Set to -1, these will be calculated later
m_angle = -1.;
m_chisq = -1.;
// Set the image, background and calculated data
m_image = image;
m_background = background;
correctImage();
m_dataCalc = transformImageToData(image);
size_t size = dataRe.size();
initImageSpace(image, background);
m_data = std::vector<double>(2 * size);
m_errors = std::vector<double>(2 * size);
// Load the experimental (measured data)
......@@ -120,6 +109,25 @@ void MaxentData::loadComplex(const std::vector<double> &dataRe,
}
}
/**
* Initializes some of the member variables, those which are common to real and
* complex data
* @param image : [input] A starting distribution for the image
* @param background : [input] The background or sky level
*/
void MaxentData::initImageSpace(const std::vector<double> &image,
double background) {
// Set to -1, these will be calculated later
m_angle = -1.;
m_chisq = -1.;
// Load image, calculated data and background
m_image = image;
m_background = background;
correctImage();
m_dataCalc = transformImageToData(image);
}
/**
* Corrects the image according to the type of entropy
*/
......@@ -174,17 +182,28 @@ void MaxentData::updateImage(const std::vector<double> &delta) {
*/
std::vector<double> MaxentData::calculateChiGrad() const {
// Calculates the gradient of Chi
// CGrad_i = -2 * [ data_i - dataCalc_i ] / [ error_i ]^2
if ((m_data.size() != m_errors.size()) ||
(m_data.size() != m_dataCalc.size())) {
(m_dataCalc.size() % m_data.size())) {
// Data and errors must have the same number of data points
// but the reconstructed (calculated) data may contain more points
throw std::invalid_argument("Cannot compute gradient of Chi");
}
size_t size = m_data.size();
// Calculate gradient of Chi
// CGrad_i = -2 * [ data_i - dataCalc_i ] / [ error_i ]^2
std::vector<double> cgrad(size, 0.);
for (size_t i = 0; i < size; i++) {
// We only consider the experimental data points to calculate chi grad
size_t sizeDat = m_data.size();
// The number of calculated data points can be bigger than the number of
// experimental data points I am not sure how we should deal with this
// situation. On the one hand one can only consider real data and errors to
// calculate chi-square, but on the other hand this method should return a
// vector of size equal to the size of the calculated data, so I am just
// setting the 'leftovers' to zero. This is what is done in the original
// muon code.
size_t sizeDatCalc = m_dataCalc.size();
std::vector<double> cgrad(sizeDatCalc, 0.);
for (size_t i = 0; i < sizeDat; i++) {
if (m_errors[i] != 0)
cgrad[i] = -2. * (m_data[i] - m_dataCalc[i]) / m_errors[i] / m_errors[i];
}
......@@ -403,24 +422,33 @@ void MaxentData::calculateQuadraticCoefficients() {
m_coeffs.c1[k][0] /= chiSq;
}
// Then s2, c2
// Then s2
m_coeffs.s2 = Kernel::DblMatrix(dim, dim);
m_coeffs.c2 = Kernel::DblMatrix(dim, dim);
for (size_t k = 0; k < dim; k++) {
for (size_t l = 0; l < k + 1; l++) {
m_coeffs.s2[k][l] = 0.;
for (size_t i = 0; i < npoints; i++) {
m_coeffs.s2[k][l] -=
m_directionsIm[k][i] * m_directionsIm[l][i] / metric[i];
}
m_coeffs.s2[k][l] *= 1.0 / m_background;
}
}
// Then c2
npoints = m_errors.size();
m_coeffs.c2 = Kernel::DblMatrix(dim, dim);
for (size_t k = 0; k < dim; k++) {
for (size_t l = 0; l < k + 1; l++) {
m_coeffs.c2[k][l] = 0.;
for (size_t i = 0; i < npoints; i++) {
if (m_errors[i] != 0)
m_coeffs.c2[k][l] += directionsDat[k][i] * directionsDat[l][i] /
m_errors[i] / m_errors[i];
m_coeffs.s2[k][l] -=
m_directionsIm[k][i] * m_directionsIm[l][i] / metric[i];
}
m_coeffs.c2[k][l] *= 2.0 / chiSq;
m_coeffs.s2[k][l] *= 1.0 / m_background;
}
}
// Symmetrise s2, c2: reflect accross the diagonal
for (size_t k = 0; k < dim; k++) {
for (size_t l = k + 1; l < dim; l++) {
......
......@@ -17,7 +17,7 @@ using namespace Geometry;
/// Default constructor
MedianDetectorTest::MedianDetectorTest()
: DetectorDiagnostic(), m_inputWS(), m_loFrac(0.1), m_hiFrac(1.5),
m_minSpec(0), m_maxSpec(EMPTY_INT()), m_rangeLower(0.0),
m_minWsIndex(0), m_maxWsIndex(EMPTY_INT()), m_rangeLower(0.0),
m_rangeUpper(0.0), m_solidAngle(false) {}
const std::string MedianDetectorTest::category() const { return "Diagnostics"; }
......@@ -97,11 +97,12 @@ void MedianDetectorTest::exec() {
// masking and will be used to record any
// required masking from this algorithm
MatrixWorkspace_sptr countsWS = integrateSpectra(
m_inputWS, m_minSpec, m_maxSpec, m_rangeLower, m_rangeUpper, true);
m_inputWS, m_minWsIndex, m_maxWsIndex, m_rangeLower, m_rangeUpper, true);
// 0. Correct for solid angle, if desired
if (m_solidAngle) {
MatrixWorkspace_sptr solidAngle = getSolidAngles(m_minSpec, m_maxSpec);
MatrixWorkspace_sptr solidAngle =
getSolidAngles(m_minWsIndex, m_maxWsIndex);
if (solidAngle != nullptr) {
countsWS = countsWS / solidAngle;
}
......@@ -146,25 +147,25 @@ void MedianDetectorTest::exec() {
*/
void MedianDetectorTest::retrieveProperties() {
m_inputWS = getProperty("InputWorkspace");
int maxSpecIndex = static_cast<int>(m_inputWS->getNumberHistograms()) - 1;
int maxWsIndex = static_cast<int>(m_inputWS->getNumberHistograms()) - 1;
m_parents = getProperty("LevelsUp");
m_minSpec = getProperty("StartWorkspaceIndex");
if ((m_minSpec < 0) || (m_minSpec > maxSpecIndex)) {
m_minWsIndex = getProperty("StartWorkspaceIndex");
if ((m_minWsIndex < 0) || (m_minWsIndex > maxWsIndex)) {
g_log.warning("StartSpectrum out of range, changed to 0");
m_minSpec = 0;
m_minWsIndex = 0;
}
m_maxSpec = getProperty("EndWorkspaceIndex");
if (m_maxSpec == EMPTY_INT())
m_maxSpec = maxSpecIndex;
if ((m_maxSpec < 0) || (m_maxSpec > maxSpecIndex)) {
m_maxWsIndex = getProperty("EndWorkspaceIndex");
if (m_maxWsIndex == EMPTY_INT())
m_maxWsIndex = maxWsIndex;
if ((m_maxWsIndex < 0) || (m_maxWsIndex > maxWsIndex)) {
g_log.warning("EndSpectrum out of range, changed to max spectrum number");
m_maxSpec = maxSpecIndex;
m_maxWsIndex = maxWsIndex;
}
if ((m_maxSpec < m_minSpec)) {
if ((m_maxWsIndex < m_minWsIndex)) {
g_log.warning("EndSpectrum can not be less than the StartSpectrum, changed "
"to max spectrum number");
m_maxSpec = maxSpecIndex;
m_maxWsIndex = maxWsIndex;
}
m_loFrac = getProperty("LowThreshold");
......@@ -251,7 +252,7 @@ int MedianDetectorTest::maskOutliers(
double median = medianvec[i];
PARALLEL_FOR1(countsWS)
for (int j = 0; j < static_cast<int>(hists.size()); ++j) {
for (int j = 0; j < static_cast<int>(hists.size()); ++j) { // NOLINT
const double value = countsWS->readY(hists[j])[0];
if ((value == 0.) && checkForMask) {
const std::set<detid_t> &detids =
......@@ -300,7 +301,7 @@ int MedianDetectorTest::doDetectorTests(
const double minSigma = getProperty("SignificanceTest");
// prepare to report progress
const int numSpec(m_maxSpec - m_minSpec);
const int numSpec(m_maxWsIndex - m_minWsIndex);
const int progStep = static_cast<int>(ceil(numSpec / 30.0));
int steps(0);
......
......@@ -356,9 +356,9 @@ Q1D2::setUpOutputWorkspace(const std::vector<double> &binParams) const {
/** Calculate the normalization term for each output bin
* @param wavStart [in] the index number of the first bin in the input
* wavelengths that is actually being used
* @param specInd [in] the spectrum to calculate
* @param wsIndex [in] the ws index of the spectrum to calculate
* @param pixelAdj [in] if not NULL this is workspace contains single bins with
* the adjustments, e.g. detector efficencies, for the given spectrum index
* the adjustments, e.g. detector efficencies, for the given ws index
* @param wavePixelAdj [in] if not NULL this is workspace that contains the
* adjustments for the pixels and wavelenght dependend values.
* @param binNorms [in] pointer to a contigious array of doubles that are the
......@@ -370,7 +370,7 @@ Q1D2::setUpOutputWorkspace(const std::vector<double> &binParams) const {
* @param normETo2 [out] this pointer must point to the end of the norm array,
* it will be filled with the total of the error on the normalization
*/
void Q1D2::calculateNormalization(const size_t wavStart, const size_t specInd,
void Q1D2::calculateNormalization(const size_t wavStart, const size_t wsIndex,
API::MatrixWorkspace_const_sptr pixelAdj,
API::MatrixWorkspace_const_sptr wavePixelAdj,
double const *const binNorms,
......@@ -378,7 +378,7 @@ void Q1D2::calculateNormalization(const size_t wavStart, const size_t specInd,
const MantidVec::iterator norm,
const MantidVec::iterator normETo2) const {
double detectorAdj, detAdjErr;
pixelWeight(pixelAdj, specInd, detectorAdj, detAdjErr);
pixelWeight(pixelAdj, wsIndex, detectorAdj, detAdjErr);
// use that the normalization array ends at the start of the error array
for (MantidVec::iterator n = norm, e = normETo2; n != normETo2; ++n, ++e) {
*n = detectorAdj;
......@@ -389,32 +389,32 @@ void Q1D2::calculateNormalization(const size_t wavStart, const size_t specInd,
if (wavePixelAdj)
// pass the iterator for the wave pixel Adj dependent
addWaveAdj(binNorms + wavStart, binNormEs + wavStart, norm, normETo2,
wavePixelAdj->readY(specInd).begin() + wavStart,
wavePixelAdj->readE(specInd).begin() + wavStart);
wavePixelAdj->readY(wsIndex).begin() + wavStart,
wavePixelAdj->readE(wsIndex).begin() + wavStart);
else
addWaveAdj(binNorms + wavStart, binNormEs + wavStart, norm, normETo2);
}
normToMask(wavStart, specInd, norm, normETo2);
normToMask(wavStart, wsIndex, norm, normETo2);
}
/** Calculates the normalisation for the spectrum specified by the index number
* that was passed
* as the solid anlge multiplied by the pixelAdj that was passed
* as the solid angle multiplied by the pixelAdj that was passed
* @param[in] pixelAdj if not NULL this is workspace contains single bins with
* the adjustments, e.g. detector efficencies, for the given spectrum index
* @param[in] specIndex the spectrum index to return the data from
* @param[out] weight the solid angle or if pixelAdj the solid anlge times the
* the adjustments, e.g. detector efficiencies, for the given ws index
* @param[in] wsIndex the workspace index to return the data from
* @param[out] weight the solid angle or if pixelAdj the solid angle times the
* pixel adjustment for this spectrum
* @param[out] error the error on the weight, only non-zero if pixelAdj
* @throw LogicError if the solid angle is tiny or negative
*/
void Q1D2::pixelWeight(API::MatrixWorkspace_const_sptr pixelAdj,
const size_t specIndex, double &weight,
const size_t wsIndex, double &weight,
double &error) const {
const V3D samplePos = m_dataWS->getInstrument()->getSample()->getPos();
if (m_doSolidAngle)
weight = m_dataWS->getDetector(specIndex)->solidAngle(samplePos);
weight = m_dataWS->getDetector(wsIndex)->solidAngle(samplePos);
else
weight = 1.0;
......@@ -424,8 +424,8 @@ void Q1D2::pixelWeight(API::MatrixWorkspace_const_sptr pixelAdj,
}
// this input multiplies up the adjustment if it exists
if (pixelAdj) {
weight *= pixelAdj->readY(specIndex)[0];
error = weight * pixelAdj->readE(specIndex)[0];
weight *= pixelAdj->readY(wsIndex)[0];
error = weight * pixelAdj->readE(wsIndex)[0];
} else {
error = 0.0;
}
......@@ -523,21 +523,21 @@ void Q1D2::addWaveAdj(const double *c, const double *Dc,
}
/** Scaled to bin masking, to the normalization
* @param[in] offSet the inex number of the first bin in the input wavelengths
* @param[in] offSet the index number of the first bin in the input wavelengths
* that is actually being used
* @param[in] specIndex the spectrum to calculate
* @param[in] wsIndex the spectrum to calculate
* @param[in,out] theNorms normalization for each bin, this is multiplied by the
* proportion that is not masked and the normalization workspace
* @param[in,out] errorSquared the running total of the square of the
* uncertainty in the normalization
*/
void Q1D2::normToMask(const size_t offSet, const size_t specIndex,
void Q1D2::normToMask(const size_t offSet, const size_t wsIndex,
const MantidVec::iterator theNorms,
const MantidVec::iterator errorSquared) const {