Newer
Older
#include "MantidAlgorithms/CalcCountRate.h"
#include "MantidKernel/PropertyWithValue.h"
#include "MantidKernel/BoundedValidator.h"
#include "MantidKernel/TimeSeriesProperty.h"
#include "MantidKernel/ListValidator.h"
#include "MantidKernel/UnitFactory.h"
#include "MantidKernel/MandatoryValidator.h"
#include "MantidAPI/Axis.h"
#include "MantidAPI/NumericAxis.h"
#include "MantidAPI/WorkspaceFactory.h"
#include "MantidDataObjects/Workspace2D.h"
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
namespace Mantid {
namespace Algorithms {
// Register the algorithm into the AlgorithmFactory
DECLARE_ALGORITHM(CalcCountRate)
//----------------------------------------------------------------------------------------------
/// Algorithms name for identification. @see Algorithm::name
const std::string CalcCountRate::name() const { return "CalcCountRate"; }
/// Algorithm's version for identification. @see Algorithm::version
int CalcCountRate::version() const { return 1; }
/// Algorithm's category for identification. @see Algorithm::category
const std::string CalcCountRate::category() const {
return "Inelastic\\Utility";
}
/// Algorithm's summary for use in the GUI and help. @see Algorithm::summary
const std::string CalcCountRate::summary() const {
return "Calculates instrument count rate as the function of the "
"experiment time and adds CountRate log to the source workspace.";
}
//----------------------------------------------------------------------------------------------
/** Declare the algorithm's properties.
*/
void CalcCountRate::init() {
declareProperty(
Kernel::make_unique<API::WorkspaceProperty<DataObjects::EventWorkspace>>(
"Workspace", "", Kernel::Direction::InOut),
"Name of the event workspace to calculate counting rate for.");
declareProperty(Kernel::make_unique<Kernel::PropertyWithValue<double>>(
"XMin", EMPTY_DBL(), Kernel::Direction::Input),
"Minimal value of X-range for the rate calculations. If left "
"to default, Workspace X-axis minimal value is used.");
declareProperty(Kernel::make_unique<Kernel::PropertyWithValue<double>>(
"XMax", EMPTY_DBL(), Kernel::Direction::Input),
"Maximal value of X-range for the rate calculations. If left "
"to default, Workspace X-axis maximal value is used.");
declareProperty(
Kernel::make_unique<Kernel::PropertyWithValue<std::string>>(
"RangeUnits", "Energy",
boost::make_shared<Kernel::StringListValidator>(
Kernel::UnitFactory::Instance().getKeys()),
Kernel::Direction::Input),
"The units from Mantid Unit factory for calculating the "
"counting rate and XMin-XMax ranges are in. If the "
"X-axis of the input workspace is not expressed"
"in this units, unit conversion will be performed, so the "
"workspace should contain all necessary information for this "
"conversion. E.g. if *RangeUnits* is *EnergyTransfer*, Ei "
"log containing incident energy value should be attached to the "
"input workspace.");
std::vector<std::string> propOptions{"Elastic", "Direct", "Indirect"};
declareProperty("EMode", "Elastic",
boost::make_shared<Kernel::StringListValidator>(propOptions),
"The range units above conversion mode (default: elastic)");
// Used logs group
std::string used_logs_mode("Used normalization logs");
declareProperty(
"NormalizeTheRate", true,
"Usually you want to normalize counting rate to some "
"rate related to the source beam intensity. Change this to "
"'false' if appropriate time series log is broken || not attached to "
"the input workspace.");
declareProperty("UseLogDerivative", false, "If the normalization log gives "
"cumulative counting, derivative "
"of this log is necessary to get "
"correct normalization values.");
declareProperty(
"NormalizationLogName", "proton_charge",
"The name of the log, used in the counting rate normalization. ");
declareProperty(
"UseNormLogGranularity", true,
"If true, the calculated log will have the normalization log "
"accuracy; If false, the 'NumTimeSteps' in the visualization "
"workspace below will be used for the target log granularity too.");
setPropertyGroup("NormalizeTheRate", used_logs_mode);
setPropertyGroup("UseLogDerivative", used_logs_mode);
setPropertyGroup("NormalizationLogName", used_logs_mode);
setPropertyGroup("UseNormLogGranularity", used_logs_mode);
declareProperty(
"CountRateLogName", "block_count_rate",
boost::make_shared<Kernel::MandatoryValidator<std::string>>(),
"The name of the processed time series log with count rate to be added"
" to the source workspace");
// visualisation group
std::string spur_vis_mode("Spurion visualisation");
"VisualizationWsName", "",
"Optional name to build 2D matrix workspace for spurion visualization. "
"If name is provided, a 2D workspace with this name will be created "
"containing data to visualize counting rate as function of time in the "
"ranges "
declareProperty(Kernel::make_unique<API::WorkspaceProperty<>>(
"VisualizationWs", "", Kernel::Direction::Output,
API::PropertyMode::Optional));
auto mustBeReasonable = boost::make_shared<Kernel::BoundedValidator<int>>();
mustBeReasonable->setLower(3);
declareProperty(
Kernel::make_unique<Kernel::PropertyWithValue<int>>(
"NumTimeSteps", 200, mustBeReasonable, Kernel::Direction::Input),
"Number of time steps (time accuracy) the visualization workspace has. "
"Also number of steps in 'CountRateLogName' log if "
"'UseNormLogGranularity' is set to false. Should be bigger than 3");
declareProperty(
Kernel::make_unique<Kernel::PropertyWithValue<int>>(
"XResolution", 100, mustBeReasonable, Kernel::Direction::Input),
"Number of steps (accuracy) of the visualization workspace has along "
"X-axis. ");
setPropertyGroup("VisualizationWsName", spur_vis_mode);
setPropertyGroup("NumTimeSteps", spur_vis_mode);
setPropertyGroup("XResolution", spur_vis_mode);
setPropertyGroup("VisualizationWs", spur_vis_mode);
}
//----------------------------------------------------------------------------------------------
/** Execute the algorithm.
*/
void CalcCountRate::exec() {
DataObjects::EventWorkspace_sptr sourceWS = getProperty("Workspace");
API::EventType et = sourceWS->getEventType();
if (et == API::EventType::WEIGHTED_NOTIME) {
throw std::runtime_error("Event workspace " + sourceWS->name() +
" contains events without necessary frame "
"information. Can not process counting rate");
}
// Identity correct way to treat input logs and general properties of output
// log
this->setOutLogParameters(sourceWS);
// identify ranges for count rate calculations and initiate source workspace
this->setSourceWSandXRanges(sourceWS);
// check if visualization workspace is necessary and if it is, prepare
// visualization workspace to use
this->checkAndInitVisWorkspace();
// create results log and add it to the source workspace
std::string logname = getProperty("CountRateLogName");
auto newlog = new Kernel::TimeSeriesProperty<double>(logname);
sourceWS->mutableRun().addProperty(newlog, true);
// calculate averages requested and modify results log
// clear up log derivative and existing log pointer (if any)
// to avoid incorrect usage
// at subsequent calls to the same algorithm.
m_tmpLogHolder.release();
/** Process input workspace to calculate instrument counting rate as function of
*experiment time
*@param InputWorkspace :: shared pointer to the input workspace to process
*@param targLog :: pointer to time series property containing count rate
*log.
* Property should exist on input and will be modified
*with
* counting rate log on output.
*/
void CalcCountRate::calcRateLog(
DataObjects::EventWorkspace_sptr &InputWorkspace,
Kernel::TimeSeriesProperty<double> *const targLog) {
MantidVec countRate(m_numLogSteps);
std::vector<double> countNormalization;
if (this->notmalizeCountRate())
countNormalization = m_pNormalizationLog->valuesAsVector();
std::unique_ptr<std::mutex[]> pVisWS_locks;
if (this->buildVisWS()) {
pVisWS_locks.reset(new std::mutex[m_visWs->getNumberHistograms()]);
}
int64_t nHist = static_cast<int64_t>(InputWorkspace->getNumberHistograms());
// Initialize progress reporting.
API::Progress prog(this, 0.0, 1.0, nHist);
double dTRangeMin = static_cast<double>(m_TRangeMin.totalNanoseconds());
double dTRangeMax = static_cast<double>(m_TRangeMax.totalNanoseconds());
std::vector<MantidVec> Buff;
int nThreads;
#pragma omp parallel
{
#pragma omp single
{
// initialize thread's histogram buffer
nThreads = omp_get_num_threads();
Buff.resize(nThreads);
for (size_t i = 0; i < nThreads; i++) {
Buff[i].assign(m_numLogSteps, 0);
}
}
#pragma omp for
for (int64_t i = 0; i < nHist; ++i) {
auto nThread = omp_get_thread_num();
PARALLEL_START_INTERUPT_REGION
// Get a const event list reference. eventInputWS->dataY() doesn't work.
const DataObjects::EventList &el = InputWorkspace->getSpectrum(i);
el.generateCountsHistogramPulseTime(dTRangeMin, dTRangeMax, Buff[nThread],
m_XRangeMin, m_XRangeMax);
if (this->buildVisWS()) {
this->hisogramEvents(el, pVisWS_locks.get());
}
// Report progress
prog.report(name());
PARALLEL_END_INTERUPT_REGION
}
PARALLEL_CHECK_INTERUPT_REGION
// calculate final sums
#pragma omp for
for (int64_t j = 0; j < m_numLogSteps; j++) {
for (size_t i = 0; i < nThreads; i++) {
countRate[j] += Buff[i][j];
}
// normalize if requested
if (!countNormalization.empty()) {
countRate[j] /= countNormalization[j];
if (this->buildVisWS()) {
this->normalizeVisWs(j);
}
}
}
}
// recover histogram timing, but start assuming run starts at
std::vector<Kernel::DateAndTime> times(m_numLogSteps);
double dt = (dTRangeMax - dTRangeMin) / static_cast<double>(m_numLogSteps);
auto t0 = m_TRangeMin.totalNanoseconds();
for (size_t i = 0; i < m_numLogSteps; i++) {
times[i] = Kernel::DateAndTime(t0 + static_cast<int64_t>((0.5 + i) * dt));
}
// store calculated values within the target log.
targLog->replaceValues(times, countRate);
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
/** hisogram event list into visualization workspace
* @param el :: event list to rebin into visualization workspace
* @param spectraLocks :: pointer to the array of mutexes to lock modifyed
* visualization workspace spectra for a thread
*/
void CalcCountRate::hisogramEvents(const DataObjects::EventList &el,
std::mutex *spectraLocks) {
if (el.empty())
return;
auto events = el.getEvents();
for (const DataObjects::TofEvent &ev : events) {
double pulsetime = static_cast<double>(ev.pulseTime().totalNanoseconds());
double tof = ev.tof();
if (pulsetime < m_visT0 || pulsetime >= m_visTmax)
continue;
if (tof < m_XRangeMin || tof >= m_XRangeMax)
continue;
size_t n_spec = static_cast<size_t>((pulsetime - m_visT0) / m_visDT);
size_t n_bin = static_cast<size_t>((tof - m_XRangeMin) / m_visDX);
(spectraLocks + n_spec)->lock();
auto &Y = m_visWs->mutableY(n_spec);
Y[n_bin]++;
(spectraLocks + n_spec)->unlock();
}
}
/** Normalize single spectra of the normalization workspace using prepared
* normzlization log*/
void CalcCountRate::normalizeVisWs(int64_t wsIndex) {
auto Y = m_visWs->dataY(wsIndex);
for (auto &yv : Y) {
yv /= m_visNorm[wsIndex];
}
}
/*Analyse input log parameters and logs, attached to the workspace and identify
* the parameters of the target log, including experiment time.
*
@param InputWorkspace -- input workspace to analyse logs
void CalcCountRate::setOutLogParameters(
const DataObjects::EventWorkspace_sptr &InputWorkspace) {
std::string NormLogName = getProperty("NormalizationLogName");
m_normalizeResult = getProperty("NormalizeTheRate");
bool useLogDerivative = getProperty("UseLogDerivative");
bool useLogAccuracy = getProperty("UseNormLogGranularity");
bool logPresent = InputWorkspace->run().hasProperty(NormLogName);
if (!logPresent) {
if (m_normalizeResult) {
g_log.warning()
<< "Normalization by log: '" << NormLogName
<< "' values requested but the log is not attached to the "
"workspace. Normalization disabled\n";
m_normalizeResult = false;
g_log.warning() << "Normalization by log: '" << NormLogName
<< "' -- log derivative requested but the source log is "
"not attached to "
"the workspace. Log derivative will not be used.\n";
useLogDerivative = false;
}
if (useLogAccuracy) {
g_log.warning() << "Using accuracy of the log: '" << NormLogName
<< "' is requested but the log is not attached to the "
"'NumTimeSteps' property value\n";
useLogAccuracy = false;
}
} else {
m_pNormalizationLog =
InputWorkspace->run().getTimeSeriesProperty<double>(NormLogName);
// Analyse properties interactions
// if property derivative is specified.
if (useLogDerivative) {
m_tmpLogHolder = m_pNormalizationLog->getDerivative();
m_pNormalizationLog = m_tmpLogHolder.get();
m_useLogDerivative = true;
if (m_normalizeResult) {
if (!useLogAccuracy) {
g_log.warning() << "Change of the counting log accuracy while "
"normalizing by log values is not implemented. Will "
"use log accuracy.";
useLogAccuracy = true;
}
} //---------------------------------------------------------------------
// find target log ranges and identify what normalization should be used
Kernel::DateAndTime runTMin, runTMax;
InputWorkspace->getPulseTimeMinMax(runTMin, runTMax);
//
if (useLogAccuracy) { // extract log times located inside the run time
Kernel::DateAndTime tLogMin, tLogMax;
if (m_useLogDerivative) { // derivative moves events to the bin center,
// but we need initial range
auto pSource =
InputWorkspace->run().getTimeSeriesProperty<double>(NormLogName);
tLogMin = pSource->firstTime();
tLogMax = pSource->lastTime();
} else {
tLogMin = m_pNormalizationLog->firstTime();
tLogMax = m_pNormalizationLog->lastTime();
}
//
if (tLogMin < runTMin || tLogMax > runTMax) {
if (tLogMin > runTMax ||
tLogMax < runTMin) { // log time is outside of the experiment time.
// Log normalization is impossible
g_log.warning()
<< "Normalization log " << m_pNormalizationLog->name()
<< " time lies outside of the the whole experiment time. "
"Log normalization impossible.\n";
m_pNormalizationLog = nullptr;
m_normalizeResult = false;
useLogAccuracy = false;
} else {
if (!m_tmpLogHolder) {
m_tmpLogHolder = std::make_unique<Kernel::TimeSeriesProperty<double>>(
*m_pNormalizationLog->clone());
}
m_tmpLogHolder->filterByTime(runTMin, runTMax);
m_pNormalizationLog = m_tmpLogHolder.get();
m_numLogSteps = m_pNormalizationLog->realSize();
}
} else {
if (tLogMin > runTMin || tLogMax < runTMax) {
g_log.warning() << "Normalization log " << m_pNormalizationLog->name()
<< " time does not cover the whole experiment time. "
"Log normalization impossible.\n";
m_pNormalizationLog = nullptr;
m_normalizeResult = false;
useLogAccuracy = false;
}
}
}
if (!useLogAccuracy) {
m_TRangeMin = runTMin;
// histogramming excludes rightmost events. Modify max limit to keep them
double t_epsilon = double(runTMax.totalNanoseconds() *
(1 + std::numeric_limits<double>::epsilon()));
int64_t eps_increment =
static_cast<int64_t>(t_epsilon - double(runTMax.totalNanoseconds()));
m_TRangeMax =
runTMax + eps_increment; // *(1+std::numeric_limits<double>::epsilon());
}
/* Retrieve and define data search ranges from input workspace parameters and
*@param InputWorkspace -- event workspace to process. Also retrieves algorithm
*@return -- the input workspace cropped according to XMin-XMax ranges in units,
void CalcCountRate::setSourceWSandXRanges(
DataObjects::EventWorkspace_sptr &InputWorkspace) {
std::string RangeUnits = getProperty("RangeUnits");
const auto unit = axis->unit();
API::MatrixWorkspace_sptr wst;
if (unit->unitID() != RangeUnits) {
auto conv = createChildAlgorithm("ConvertUnits", 0, 1);
std::string wsName = InputWorkspace->name();
if (wsName.empty()) {
wsName = "_CountRate_UnitsConverted";
} else {
wsName = "_" + wsName + "_converted";
}
conv->setProperty("InputWorkspace", InputWorkspace);
conv->setPropertyValue("OutputWorkspace", wsName);
std::string Emode = getProperty("Emode");
conv->setProperty("Emode", Emode);
conv->execute();
wst = conv->getProperty("OutputWorkspace");
} else {
wst = InputWorkspace;
m_workingWS = boost::dynamic_pointer_cast<DataObjects::EventWorkspace>(wst);
if (!m_workingWS) {
throw std::runtime_error("SetWSDataRanges:Can not retrieve EventWorkspace "
"after converting units");
}
// data ranges
m_XRangeMin = getProperty("XMin");
m_XRangeMax = getProperty("XMax");
if (m_XRangeMin == EMPTY_DBL() && m_XRangeMax == EMPTY_DBL()) {
m_rangeExplicit = false;
} else {
m_rangeExplicit = true;
}
m_workingWS->getEventXMinMax(realMin, realMax);
if (!m_rangeExplicit) { // The range is the whole workspace range
m_XRangeMin = realMin;
// include rightmost limit into the histogramming
m_XRangeMax = realMax * (1. + std::numeric_limits<double>::epsilon());
if (m_XRangeMin == EMPTY_DBL()) {
}
if (m_XRangeMax == EMPTY_DBL()) {
// include rightmost limit into the histogramming
m_XRangeMax = realMax * (1. + std::numeric_limits<double>::epsilon());
g_log.debug() << "Workspace constrain min range changed from: "
<< m_XRangeMin << " To: " << realMin << std::endl;
m_XRangeMin = realMin;
g_log.debug() << "Workspace constrain max range changed from: "
<< m_XRangeMax << " To: " << realMax << std::endl;
m_XRangeMax = realMax * (1. + std::numeric_limits<double>::epsilon());
/**Check if visualization workspace is necessary and initiate it if requested.
* Sets or clears up internal m_visWS pointer and "do-visualization workspace"
* option.
*/
void CalcCountRate::checkAndInitVisWorkspace() {
std::string visWSName = getProperty("VisualizationWsName");
if (visWSName.empty()) {
m_visWs.reset();
m_doVis = false;
m_doVis = true;
int numTBins = getProperty("NumTimeSteps");
int numXBins = getProperty("XResolution");
std::string RangeUnits = getProperty("RangeUnits");
m_visWs = boost::dynamic_pointer_cast<DataObjects::Workspace2D>(
API::WorkspaceFactory::Instance().create("Workspace2D", numTBins,
numXBins + 1, numXBins));
m_visWs->setTitle(visWSName);
double Xmax = m_XRangeMax;
// a bit dodgy code. It can be generalized
if (std::isinf(Xmax)) {
auto Xbin = m_workingWS->readX(0);
for (int64_t i = Xbin.size() - 1; i >= 0; --i) {
if (!std::isinf(Xbin[i])) {
Xmax = Xbin[i];
break;
}
}
if (std::isinf(Xmax)) {
g_log.warning() << "All X-range for visualization workspace is infinity. "
"Can not build visualization workspace in the units "
"requested\n";
m_visWs.reset();
m_doVis = false;
return;
}
}
// define X-axis in target units
double dX = (Xmax - m_XRangeMin) / numXBins;
std::vector<double> xx(numXBins);
for (size_t i = 0; i < numXBins; ++i) {
xx[i] = m_XRangeMin + (0.5 + static_cast<double>(i)) * dX;
}
auto ax0 = new API::NumericAxis(xx);
ax0->setUnit(RangeUnits);
m_visWs->replaceAxis(0, ax0);
// define Y axis (in seconds);
double dt =
((m_TRangeMax.totalNanoseconds() - m_TRangeMin.totalNanoseconds()) /
(numTBins)) *
1.e-9;
xx.resize(numTBins);
for (size_t i = 0; i < numTBins; i++) {
xx[i] = (0.5 + static_cast<double>(i)) * dt;
}
auto ax1 = new API::NumericAxis(xx);
auto labelY = boost::dynamic_pointer_cast<Kernel::Units::Label>(
Kernel::UnitFactory::Instance().create("Label"));
labelY->setLabel("sec");
ax1->unit() = labelY;
m_visWs->replaceAxis(1, ax1);
setProperty("VisualizationWs", m_visWs);
// define binning parameters used while calculating visualization
m_visX0 = m_XRangeMin;
m_visDX = dX;
m_visT0 = static_cast<double>(m_TRangeMin.totalNanoseconds());
m_visTmax = static_cast<double>(m_TRangeMax.totalNanoseconds());
m_visDT = (m_visTmax - m_visT0) / static_cast<double>(numTBins);
if (this->notmalizeCountRate()) {
m_visNorm.resize(numTBins);
this->buildVisWSNormalization(m_visNorm);
}
/** Helper function to check if visualization workspace should be used*/
bool CalcCountRate::buildVisWS() const { return m_doVis; }
/** Helper function, mainly for testing
* @return true if count rate should be normalized and false
* otherwise */
bool CalcCountRate::notmalizeCountRate() const { return m_normalizeResult; }
/** Helper function, mainly for testing
* @return true if log derivative is used instead of log itself */
bool CalcCountRate::useLogDerivative() const { return m_useLogDerivative; }
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
/** method to prepare normalization vector for the visuzliation workspace using
* data from normalization log with, usually, different number of time steps*/
void CalcCountRate::buildVisWSNormalization(
std::vector<double> &normalization) {
if (!m_pNormalizationLog) {
m_normalizeResult = false;
g_log.warning() << "CalcCountRate::buildVisWSNormalization: No source "
"normalization log is found. Will not normalize "
"visualization workspace\n";
return;
}
// visualization workspace should be present and initialized at this stage:
auto ax = dynamic_cast<API::NumericAxis *>(m_visWs->getAxis(1));
if (!ax)
throw std::runtime_error(
"Can not retrieve Y-axis from visualization workspace");
normalization.resize(ax->length());
auto log_time = m_pNormalizationLog->timesAsVectorSeconds();
auto log_val = m_pNormalizationLog->valuesAsVector();
// has to be (and usually is equal step (? checks?).
// Logging has some bug in converting to time
double dtSource = log_time[2] - log_time[1];
// calculate source distribution
std::vector<double> source(log_time.size());
size_t last = log_time.size() - 1;
// log values are right shifted values
for (size_t i = 1; i < last; ++i) {
source[i] = log_val[i] / dtSource;
}
source[0] = 0.5 * log_val[0] / dtSource;
source[last] = 0.5 * log_val[last] / dtSource;
auto t_norm = ax->getValues();
double dt = t_norm[1] -
t_norm[0]; // equal step, axis points in the middle of the binning
int logSize = m_pNormalizationLog->realSize();
if (dt <= dtSource) {
// linearly interpolate to fine grid
for (size_t i = 0; i < normalization.size(); ++i) {
double tn = t_norm[0] + i * dt;
if (tn < log_time[0] || tn >= log_time[last]) {
normalization[i] = 0; // should not ever happen at this stage
continue;
}
size_t ind = static_cast<size_t>((tn - log_time[0]) / dtSource);
normalization[i] = dt * (source[ind] +
(tn - log_time[ind]) *
(source[ind + 1] - source[ind]) / dtSource);
}
} else {
// Integrate to coarce grid.
// std::vector<double> log_edges;
// VectorHelper::convertToBinBoundary(log_time, log_edges);
auto t_bins = ax->createBinBoundaries();
dt = t_bins[1] - t_bins[0]; // equal bins
for (size_t i = 0; i < normalization.size(); i++) {
double t0 = t_bins[i];
double t1 = t_bins[i + 1];
if (t0 < log_time[0]) {
if (t1 <= log_time[0]) {
normalization[i] = 0; // should not ever happen at this stage
continue;
} else {
t0 = log_time[0];
}
}
if (t1 > log_time[last]) {
if (t0 >= log_time[last]) {
normalization[i] = 0; // should not ever happen at this stage
continue;
} else {
t1 = log_time[last] * (1 - std::numeric_limits<double>::epsilon());
}
}
size_t i0 = static_cast<size_t>((t0 - log_time[0]) / dtSource);
size_t i1 = static_cast<size_t>((t1 - log_time[0]) / dtSource);
normalization[i] =
dtSource *
(std::accumulate(source.begin() + i0 + 1, source.begin() + i1, 0.) +
0.5 * (source[i0] +
(t0 - log_time[i0]) * (source[i0 + 1] - source[i0]) /
dtSource) +
0.5 * (source[i1] +
(t1 - log_time[i1]) * (source[i1 + 1] - source[i1])));
}
}
}
} // namespace Algorithms
} // namespace Mantid