Unverified Commit a8840401 authored by Coleman Kendrick's avatar Coleman Kendrick Committed by GitHub
Browse files

Merge pull request #31961 from mantidproject/scd356_four_circle_integration

Additional integration methods for HB3AIntegrateDetectorPeaks ornl-next
parents c0248721 114b5b78
......@@ -277,7 +277,7 @@ class HB3AAdjustSampleNorm(PythonAlgorithm):
else:
norm_data = CloneMDWorkspace(data)
if self.getProperty("ScaleByMotorStep").value:
if self.getProperty("ScaleByMotorStep").value and self.getProperty("OutputType").value != "Detector":
run = data.getExperimentInfo(0).run()
scan_log = 'omega' if np.isclose(run.getTimeAveragedStd('phi'), 0.0) else 'phi'
scan_axis = run[scan_log].value
......
......@@ -7,8 +7,9 @@
from mantid.api import (AlgorithmFactory, IPeaksWorkspaceProperty,
PythonAlgorithm, PropertyMode, ADSValidator,
WorkspaceGroup)
from mantid.kernel import (Direction, IntArrayProperty, Property,
IntArrayLengthValidator, StringArrayProperty)
from mantid.kernel import (Direction, EnabledWhenProperty, IntArrayProperty, IntBoundedValidator,
Property, PropertyCriterion, IntArrayLengthValidator, StringArrayProperty,
StringListValidator)
from mantid.simpleapi import (mtd, IntegrateMDHistoWorkspace,
CreatePeaksWorkspace, DeleteWorkspace,
AnalysisDataService, SetGoniometer,
......@@ -38,6 +39,22 @@ class HB3AIntegrateDetectorPeaks(PythonAlgorithm):
self.declareProperty(StringArrayProperty("InputWorkspace", direction=Direction.Input, validator=ADSValidator()),
doc="Workspace or comma-separated workspace list containing input MDHisto scan data.")
self.declareProperty("Method", direction=Direction.Input, defaultValue="Fitted",
validator=StringListValidator(["Counts", "CountsWithFitting", "Fitted"]),
doc="Integration method to use")
self.declareProperty("NumBackgroundPts", direction=Direction.Input, defaultValue=3,
validator=IntBoundedValidator(lower=0),
doc="Number of background points from beginning and end of scan to use for background estimation")
self.setPropertySettings("NumBackgroundPts", EnabledWhenProperty("Method", PropertyCriterion.IsEqualTo, "Counts"))
self.declareProperty("WidthScale", direction=Direction.Input, defaultValue=2,
validator=IntBoundedValidator(lower=0, exclusive=True),
doc="Controls integration range (+/- WidthScale/2*FWHM) defined around motor positions "
"for CountsWithFitting method")
self.setPropertySettings("WidthScale",
EnabledWhenProperty("Method", PropertyCriterion.IsEqualTo, "CountsWithFitting"))
self.declareProperty(IntArrayProperty("LowerLeft", [128, 128], IntArrayLengthValidator(2),
direction=Direction.Input), doc="Region of interest lower-left corner, in detector pixels")
self.declareProperty(IntArrayProperty("UpperRight", [384, 384], IntArrayLengthValidator(2),
......@@ -55,7 +72,8 @@ class HB3AIntegrateDetectorPeaks(PythonAlgorithm):
self.declareProperty("ApplyLorentz", True, doc="If to apply Lorentz Correction to intensity")
self.declareProperty("OutputFitResults", False, doc="This will output the fitting result workspace and a ROI workspace")
self.setPropertySettings("OutputFitResults",
EnabledWhenProperty("Method", PropertyCriterion.IsNotEqualTo, "Counts"))
self.declareProperty("OptimizeQVector", True,
doc="This will convert the data to q and optimize the peak location using CentroidPeaksdMD")
......@@ -70,6 +88,9 @@ class HB3AIntegrateDetectorPeaks(PythonAlgorithm):
NumberOfPeaks=0,
OutputWorkspace=outWS, EnableLogging=False)
method = self.getProperty("Method").value
n_bkgr_pts = self.getProperty("NumBackgroundPts").value
n_fwhm = self.getProperty("WidthScale").value
scale = self.getProperty("ScaleFactor").value
chisqmax = self.getProperty("ChiSqMax").value
signalNoiseMin = self.getProperty("SignalNoiseMin").value
......@@ -81,7 +102,7 @@ class HB3AIntegrateDetectorPeaks(PythonAlgorithm):
optmize_q = self.getProperty("OptimizeQVector").value
output_fit = self.getProperty("OutputFitResults").value
if output_fit:
if output_fit and method != "Counts":
fit_results = WorkspaceGroup()
AnalysisDataService.addOrReplace(outWS+"_fit_results", fit_results)
......@@ -98,98 +119,150 @@ class HB3AIntegrateDetectorPeaks(PythonAlgorithm):
run = mtd[inWS].getExperimentInfo(0).run()
scan_log = 'omega' if np.isclose(run.getTimeAveragedStd('phi'), 0.0) else 'phi'
scan_axis = run[scan_log].value
scan_step = (scan_axis[-1] - scan_axis[0]) / (scan_axis.size - 1)
data.setX(0, scan_axis)
y = data.extractY().flatten()
x = data.extractX().flatten()
function = f"name=FlatBackground, A0={np.nanmin(y)};" \
f"name=Gaussian, PeakCentre={x[np.nanargmax(y)]}, Height={np.nanmax(y)-np.nanmin(y)}, Sigma=0.25"
constraints = f"f0.A0 > 0, f1.Height > 0, {x.min()} < f1.PeakCentre < {x.max()}"
try:
fit_result = Fit(function, data, Output=str(data),
IgnoreInvalidData=True,
OutputParametersOnly=not output_fit,
Constraints=constraints,
StartX=startX, EndX=endX,
EnableLogging=False)
except RuntimeError as e:
self.log().warning("Failed to fit workspace {}: {}".format(inWS, e))
continue
if fit_result.OutputStatus == 'success' and fit_result.OutputChi2overDoF < chisqmax:
__tmp_pw = CreatePeaksWorkspace(OutputType='LeanElasticPeak',
InstrumentWorkspace=inWS,
NumberOfPeaks=0,
EnableLogging=False)
_, A, x, s, _ = fit_result.OutputParameters.toDict()['Value']
_, errA, _, errs, _ = fit_result.OutputParameters.toDict()['Error']
if scan_log == 'omega':
SetGoniometer(Workspace=__tmp_pw, Axis0=f'{x},0,1,0,-1', Axis1='chi,0,0,1,-1', Axis2='phi,0,1,0,-1',
EnableLogging=False)
else:
SetGoniometer(Workspace=__tmp_pw, Axis0='omega,0,1,0,-1', Axis1='chi,0,0,1,-1', Axis2=f'{x},0,1,0,-1',
EnableLogging=False)
peak = __tmp_pw.createPeakHKL([run['h'].getStatistics().median,
run['k'].getStatistics().median,
run['l'].getStatistics().median])
peak.setWavelength(float(run['wavelength'].value))
integrated_intensity = A * s * np.sqrt(2*np.pi) * scale
peak.setIntensity(integrated_intensity)
# Convert correlation back into covariance
cor_As = (fit_result.OutputNormalisedCovarianceMatrix.cell(1,4)/100
* fit_result.OutputParameters.cell(1,2) * fit_result.OutputParameters.cell(3,2))
# σ^2 = 2π (A^2 σ_s^2 + σ_A^2 s^2 + 2 A s σ_As)
integrated_intensity_error = np.sqrt(2*np.pi * (A**2 * errs**2 + s**2 * errA**2 + 2*A*s*cor_As)) * scale
peak.setSigmaIntensity(integrated_intensity_error)
if integrated_intensity/integrated_intensity_error > signalNoiseMin:
__tmp_pw.addPeak(peak)
# correct q-vector using CentroidPeaksMD
if optmize_q:
__tmp_q_ws = HB3AAdjustSampleNorm(InputWorkspaces=inWS, NormaliseBy='None', EnableLogging=False)
__tmp_pw = CentroidPeaksMD(__tmp_q_ws, __tmp_pw, EnableLogging=False)
DeleteWorkspace(__tmp_q_ws, EnableLogging=False)
if use_lorentz:
# ILL Neutron Data Booklet, Second Edition, Section 2.9, Part 4.1, Equation 7
peak = __tmp_pw.getPeak(0)
lorentz = abs(np.sin(peak.getScattering() * np.cos(peak.getAzimuthal())))
peak.setIntensity(peak.getIntensity() * lorentz)
peak.setSigmaIntensity(peak.getSigmaIntensity() * lorentz)
CombinePeaksWorkspaces(outWS, __tmp_pw, OutputWorkspace=outWS, EnableLogging=False)
DeleteWorkspace(__tmp_pw, EnableLogging=False)
if output_fit:
fit_results.addWorkspace(RenameWorkspace(tmp_inWS+'_Workspace', outWS+"_"+inWS+'_Workspace', EnableLogging=False))
fit_results.addWorkspace(RenameWorkspace(tmp_inWS+'_Parameters', outWS+"_"+inWS+'_Parameters', EnableLogging=False))
fit_results.addWorkspace(RenameWorkspace(tmp_inWS+'_NormalisedCovarianceMatrix',
outWS+"_"+inWS+'_NormalisedCovarianceMatrix', EnableLogging=False))
fit_results.addWorkspace(IntegrateMDHistoWorkspace(InputWorkspace=inWS,
P1Bin=f'{ll[1]},0,{ur[1]}',
P2Bin=f'{ll[0]},0,{ur[0]}',
P3Bin='0,{}'.format(mtd[inWS].getDimension(2).getNBins()),
OutputWorkspace=outWS+"_"+inWS+"_ROI", EnableLogging=False))
__tmp_pw = CreatePeaksWorkspace(OutputType='LeanElasticPeak',
InstrumentWorkspace=inWS,
NumberOfPeaks=0,
EnableLogging=False)
if method != "Counts":
# fit against gaussian with flat background for both the Fitted and CountsWithFitting methods
fit_result = self._fit_gaussian(inWS, data, x, y, startX, endX, output_fit)
if fit_result and fit_result.OutputStatus == 'success' and fit_result.OutputChi2overDoF < chisqmax:
B, A, peak_centre, sigma, _ = fit_result.OutputParameters.toDict()['Value']
_, errA, _, errs, _ = fit_result.OutputParameters.toDict()['Error']
if method == "Fitted":
integrated_intensity = A * sigma * np.sqrt(2 * np.pi)
# Convert correlation back into covariance
cor_As = (fit_result.OutputNormalisedCovarianceMatrix.cell(1, 4) / 100
* fit_result.OutputParameters.cell(1, 2) * fit_result.OutputParameters.cell(3, 2))
# σ^2 = 2π (A^2 σ_s^2 + σ_A^2 s^2 + 2 A s σ_As)
integrated_intensity_error = np.sqrt(
2 * np.pi * (A ** 2 * errs ** 2 + sigma ** 2 * errA ** 2 + 2 * A * sigma * cor_As))
elif method == "CountsWithFitting":
y = y[slice(np.searchsorted(x, peak_centre - 2.3548 * sigma * n_fwhm / 2),
np.searchsorted(x, peak_centre + 2.3548 * sigma * n_fwhm / 2))]
# subtract out the fitted flat background
integrated_intensity = (y.sum() - B * y.size) * scan_step
integrated_intensity_error = np.sum(np.sqrt(y)) * scan_step
# update the goniometer position based on the fitted peak center
if scan_log == 'omega':
SetGoniometer(Workspace=__tmp_pw, Axis0=f'{peak_centre},0,1,0,-1', Axis1='chi,0,0,1,-1',
Axis2='phi,0,1,0,-1',
EnableLogging=False)
else:
SetGoniometer(Workspace=__tmp_pw, Axis0='omega,0,1,0,-1', Axis1='chi,0,0,1,-1',
Axis2=f'{peak_centre},0,1,0,-1',
EnableLogging=False)
else:
self.log().warning("Skipping peak from {} because Signal/Noise={:.3f} which is less than {}"
.format(inWS, integrated_intensity/integrated_intensity_error, signalNoiseMin))
self.log().warning("Failed to fit workspace {}: Output Status={}, ChiSq={}".format(inWS,
fit_result.OutputStatus,
fit_result.OutputChi2overDoF))
self._delete_tmp_workspaces(str(__tmp_pw), tmp_inWS)
continue
else:
self.log().warning("Failed to fit workspace {}: Output Status={}, ChiSq={}".format(inWS,
fit_result.OutputStatus,
fit_result.OutputChi2overDoF))
integrated_intensity, integrated_intensity_error = self._counts_integration(data, n_bkgr_pts, scan_step)
# set the goniometer position to use the average of the scan
SetGoniometer(Workspace=__tmp_pw, Axis0='omega,0,1,0,-1', Axis1='chi,0,0,1,-1',
Axis2='phi,0,1,0,-1',
EnableLogging=False)
integrated_intensity *= scale
integrated_intensity_error *= scale
peak = __tmp_pw.createPeakHKL([run['h'].getStatistics().median,
run['k'].getStatistics().median,
run['l'].getStatistics().median])
peak.setWavelength(float(run['wavelength'].value))
peak.setIntensity(integrated_intensity)
peak.setSigmaIntensity(integrated_intensity_error)
if integrated_intensity / integrated_intensity_error > signalNoiseMin:
__tmp_pw.addPeak(peak)
# correct q-vector using CentroidPeaksMD
if optmize_q:
__tmp_q_ws = HB3AAdjustSampleNorm(InputWorkspaces=inWS, NormaliseBy='None', EnableLogging=False)
__tmp_pw = CentroidPeaksMD(__tmp_q_ws, __tmp_pw, EnableLogging=False)
DeleteWorkspace(__tmp_q_ws, EnableLogging=False)
if use_lorentz:
# ILL Neutron Data Booklet, Second Edition, Section 2.9, Part 4.1, Equation 7
peak = __tmp_pw.getPeak(0)
lorentz = abs(np.sin(peak.getScattering() * np.cos(peak.getAzimuthal())))
peak.setIntensity(peak.getIntensity() * lorentz)
peak.setSigmaIntensity(peak.getSigmaIntensity() * lorentz)
CombinePeaksWorkspaces(outWS, __tmp_pw, OutputWorkspace=outWS, EnableLogging=False)
if output_fit and method != "Counts":
fit_results.addWorkspace(RenameWorkspace(tmp_inWS + '_Workspace', outWS + "_" + inWS + '_Workspace',
EnableLogging=False))
fit_results.addWorkspace(
RenameWorkspace(tmp_inWS + '_Parameters', outWS + "_" + inWS + '_Parameters',
EnableLogging=False))
fit_results.addWorkspace(RenameWorkspace(tmp_inWS + '_NormalisedCovarianceMatrix',
outWS + "_" + inWS + '_NormalisedCovarianceMatrix',
EnableLogging=False))
fit_results.addWorkspace(IntegrateMDHistoWorkspace(InputWorkspace=inWS,
P1Bin=f'{ll[1]},0,{ur[1]}',
P2Bin=f'{ll[0]},0,{ur[0]}',
P3Bin='0,{}'.format(
mtd[inWS].getDimension(2).getNBins()),
OutputWorkspace=outWS + "_" + inWS + "_ROI",
EnableLogging=False))
else:
self.log().warning("Skipping peak from {} because Signal/Noise={:.3f} which is less than {}"
.format(inWS, integrated_intensity/integrated_intensity_error, signalNoiseMin))
for tmp_ws in (tmp_inWS, tmp_inWS+'_Workspace', tmp_inWS+'_Parameters', tmp_inWS+'_NormalisedCovarianceMatrix'):
if mtd.doesExist(tmp_ws):
DeleteWorkspace(tmp_ws, EnableLogging=False)
self._delete_tmp_workspaces(str(__tmp_pw), tmp_inWS)
self.setProperty("OutputWorkspace", mtd[outWS])
def _delete_tmp_workspaces(self, tmp_peakws, tmp_inWS):
for tmp_ws in (tmp_peakws, tmp_inWS, tmp_inWS + '_Workspace', tmp_inWS + '_Parameters',
tmp_inWS + '_NormalisedCovarianceMatrix'):
if mtd.doesExist(tmp_ws):
DeleteWorkspace(tmp_ws, EnableLogging=False)
def _counts_integration(self, ws, n_background_pts, scan_step):
"""simple cuboid integration of detector counts with estimated background subtracted"""
y = ws.extractY().flatten()
avg_background = 0.0
if n_background_pts != 0:
avg_background = (y[:n_background_pts].sum() + y[-n_background_pts:].sum()) / (2 * n_background_pts)
integrated_intensity = (y.sum() - avg_background * y.size) * scan_step
integrated_intensity_error = np.sum(np.sqrt(y)) * scan_step
return integrated_intensity, integrated_intensity_error
def _fit_gaussian(self, inWS, ws, x, y, startX, endX, output_fit):
"""fits ws to Gaussian + Flat background"""
function = f"name=FlatBackground, A0={np.nanmin(y)};" \
f"name=Gaussian, PeakCentre={x[np.nanargmax(y)]}, Height={np.nanmax(y) - np.nanmin(y)}, Sigma=0.25"
constraints = f"f0.A0 > 0, f1.Height > 0, {x.min()} < f1.PeakCentre < {x.max()}"
fit_result = ""
try:
fit_result = Fit(function, ws, Output=str(ws),
IgnoreInvalidData=True,
OutputParametersOnly=not output_fit,
Constraints=constraints,
StartX=startX, EndX=endX,
EnableLogging=False)
except RuntimeError as e:
self.log().warning("Failed to fit workspace {}: {}".format(inWS, e))
return fit_result
def _expand_groups(self):
"""expand workspace groups"""
workspaces = self.getProperty("InputWorkspace").value
......
......@@ -6,19 +6,26 @@
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
import numpy as np
from mantid.simpleapi import HB3AIntegrateDetectorPeaks, HB3AAdjustSampleNorm, DeleteWorkspace
from mantid.simpleapi import HB3AIntegrateDetectorPeaks, HB3AAdjustSampleNorm, DeleteWorkspace, mtd
class HB3ADetectorPeaksTest(unittest.TestCase):
def testIntegratePeaks(self):
data = HB3AAdjustSampleNorm("HB3A_data.nxs", OutputType="Detector", NormaliseBy="None")
@classmethod
def setUpClass(cls):
HB3AAdjustSampleNorm("HB3A_data.nxs", OutputType="Detector", NormaliseBy="None", OutputWorkspace="data")
@classmethod
def tearDownClass(cls):
DeleteWorkspace("data")
def testIntegratePeaksFitted(self):
data = mtd["data"]
# ChiSq is larger than maximum
peaks = HB3AIntegrateDetectorPeaks(data, ChiSqMax=10, ApplyLorentz=False, OptimizeQVector=False)
self.assertEqual(peaks.getNumberPeaks(), 0)
# Singal/Noise ratio is lower than requestsd
# Signal/Noise ratio is lower than requested
peaks = HB3AIntegrateDetectorPeaks(data, ChiSqMax=100, SignalNoiseMin=100,
ApplyLorentz=False, OptimizeQVector=False)
self.assertEqual(peaks.getNumberPeaks(), 0)
......@@ -32,7 +39,7 @@ class HB3ADetectorPeaksTest(unittest.TestCase):
self.assertAlmostEqual(peak0.getK(), 0, places=1)
self.assertAlmostEqual(peak0.getL(), 6, places=1)
self.assertAlmostEqual(peak0.getIntensity(), 961.6164, delta=1e-5)
self.assertAlmostEqual(peak0.getSigmaIntensity(), 10.479567, delta=1e-5)
self.assertAlmostEqual(peak0.getSigmaIntensity(), 10.479567, delta=3e-2)
self.assertAlmostEqual(peak0.getWavelength(), 1.008)
self.assertAlmostEqual(peak0.getAzimuthal(), -np.pi, delta=2e-5)
self.assertAlmostEqual(peak0.getScattering(),
......@@ -75,7 +82,7 @@ class HB3ADetectorPeaksTest(unittest.TestCase):
delta=1e-2)
self.assertAlmostEqual(peak0.getSigmaIntensity(),
10.479567 * np.sin(np.deg2rad(data.getExperimentInfo(0).run()['2theta'].value[0])),
delta=1e-2)
delta=2e-2)
# Optimize Q vector, will change Q-sample but the integration
# should be the same except the Lorentz correction since the
......@@ -86,7 +93,7 @@ class HB3ADetectorPeaksTest(unittest.TestCase):
peak0 = peaks.getPeak(0)
lorentz = abs(np.cos(peak0.getAzimuthal()) * np.sin(peak0.getScattering()))
self.assertAlmostEqual(peak0.getIntensity(), 961.6164021216964 * lorentz, delta=1e-2)
self.assertAlmostEqual(peak0.getSigmaIntensity(), 10.479567046232615 * lorentz, delta=1e-2)
self.assertAlmostEqual(peak0.getSigmaIntensity(), 10.479567046232615 * lorentz, delta=2e-2)
q_sample = peak0.getQSampleFrame()
for i in range(3):
self.assertNotAlmostEqual(q_sample[i], expected_q_sample[i])
......@@ -102,7 +109,105 @@ class HB3ADetectorPeaksTest(unittest.TestCase):
self.assertAlmostEqual(peak0.getIntensity(), 960.977625, delta=1e-5)
self.assertAlmostEqual(peak0.getSigmaIntensity(), 10.621905, delta=1e-5)
DeleteWorkspace(data)
DeleteWorkspace(peaks)
def testIntegratePeaksCounts(self):
data = mtd["data"]
peaks = HB3AIntegrateDetectorPeaks(data, Method="Counts", ApplyLorentz=False, OptimizeQVector=False)
self.assertEqual(peaks.getNumberPeaks(), 1)
peak0 = peaks.getPeak(0)
self.assertAlmostEqual(peak0.getH(), 0, places=1)
self.assertAlmostEqual(peak0.getK(), 0, places=1)
self.assertAlmostEqual(peak0.getL(), 6, places=1)
self.assertAlmostEqual(peak0.getIntensity(), 932.24967, delta=1e-5)
self.assertAlmostEqual(peak0.getSigmaIntensity(), 29.10343, delta=3e-2)
self.assertAlmostEqual(peak0.getWavelength(), 1.008)
self.assertAlmostEqual(peak0.getAzimuthal(), -np.pi, delta=2e-5)
self.assertAlmostEqual(peak0.getScattering(),
np.deg2rad(data.getExperimentInfo(0).run()['2theta'].value[0]), delta=1e-5)
q_sample = peak0.getQSampleFrame()
expected_q_sample = data.getExperimentInfo(0).sample().getOrientedLattice().qFromHKL(peak0.getHKL())
for i in range(3):
self.assertAlmostEqual(q_sample[i], expected_q_sample[i])
# Lorentz correction should scale the intensity by `sin(2theta)`
peaks = HB3AIntegrateDetectorPeaks(data, Method="Counts", ApplyLorentz=True, OptimizeQVector=False)
self.assertEqual(peaks.getNumberPeaks(), 1)
peak0 = peaks.getPeak(0)
self.assertAlmostEqual(peak0.getScattering(),
np.deg2rad(data.getExperimentInfo(0).run()['2theta'].value[0]), delta=1e-5)
self.assertAlmostEqual(peak0.getIntensity(),
932.24967 * np.sin(np.deg2rad(data.getExperimentInfo(0).run()['2theta'].value[0])),
delta=1e-2)
self.assertAlmostEqual(peak0.getSigmaIntensity(),
29.10343 * np.sin(np.deg2rad(data.getExperimentInfo(0).run()['2theta'].value[0])),
delta=2e-2)
peaks = HB3AIntegrateDetectorPeaks(data, Method="Counts", ApplyLorentz=True, OptimizeQVector=True)
self.assertEqual(peaks.getNumberPeaks(), 1)
peak0 = peaks.getPeak(0)
lorentz = abs(np.cos(peak0.getAzimuthal()) * np.sin(peak0.getScattering()))
self.assertAlmostEqual(peak0.getIntensity(), 932.24967 * lorentz, delta=1e-2)
self.assertAlmostEqual(peak0.getSigmaIntensity(), 29.10343 * lorentz, delta=2e-2)
q_sample = peak0.getQSampleFrame()
for i in range(3):
self.assertNotAlmostEqual(q_sample[i], expected_q_sample[i])
DeleteWorkspace(peaks)
def testIntegratePeaksCountsWithFitting(self):
data = mtd["data"]
peaks = HB3AIntegrateDetectorPeaks(data, Method="CountsWithFitting", ApplyLorentz=False, OptimizeQVector=False,
ChiSqMax=100)
self.assertEqual(peaks.getNumberPeaks(), 1)
peak0 = peaks.getPeak(0)
self.assertAlmostEqual(peak0.getH(), 0, places=1)
self.assertAlmostEqual(peak0.getK(), 0, places=1)
self.assertAlmostEqual(peak0.getL(), 6, places=1)
self.assertAlmostEqual(peak0.getIntensity(), 969.778546, delta=1e-5)
self.assertAlmostEqual(peak0.getSigmaIntensity(), 24.776354, delta=3e-2)
self.assertAlmostEqual(peak0.getWavelength(), 1.008)
self.assertAlmostEqual(peak0.getAzimuthal(), -np.pi, delta=2e-5)
self.assertAlmostEqual(peak0.getScattering(),
np.deg2rad(data.getExperimentInfo(0).run()['2theta'].value[0]), delta=1e-5)
q_sample = peak0.getQSampleFrame()
expected_q_sample = data.getExperimentInfo(0).sample().getOrientedLattice().qFromHKL(peak0.getHKL())
for i in range(3):
self.assertAlmostEqual(q_sample[i], expected_q_sample[i])
# Lorentz correction should scale the intensity by `sin(2theta)`
peaks = HB3AIntegrateDetectorPeaks(data, Method="CountsWithFitting", ApplyLorentz=True, OptimizeQVector=False,
ChiSqMax=100)
self.assertEqual(peaks.getNumberPeaks(), 1)
peak0 = peaks.getPeak(0)
self.assertAlmostEqual(peak0.getScattering(),
np.deg2rad(data.getExperimentInfo(0).run()['2theta'].value[0]), delta=1e-5)
self.assertAlmostEqual(peak0.getIntensity(),
969.778546 * np.sin(np.deg2rad(data.getExperimentInfo(0).run()['2theta'].value[0])),
delta=1e-2)
self.assertAlmostEqual(peak0.getSigmaIntensity(),
24.776354 * np.sin(np.deg2rad(data.getExperimentInfo(0).run()['2theta'].value[0])),
delta=2e-2)
peaks = HB3AIntegrateDetectorPeaks(data, Method="CountsWithFitting", ApplyLorentz=True, OptimizeQVector=True,
ChiSqMax=100)
self.assertEqual(peaks.getNumberPeaks(), 1)
peak0 = peaks.getPeak(0)
lorentz = abs(np.cos(peak0.getAzimuthal()) * np.sin(peak0.getScattering()))
self.assertAlmostEqual(peak0.getIntensity(), 969.778546 * lorentz, delta=1e-2)
self.assertAlmostEqual(peak0.getSigmaIntensity(), 24.776354 * lorentz, delta=2e-2)
q_sample = peak0.getQSampleFrame()
for i in range(3):
self.assertNotAlmostEqual(q_sample[i], expected_q_sample[i])
DeleteWorkspace(peaks)
......
......@@ -14,15 +14,13 @@ reduction workflow, using :ref:`HB3AAdjustSampleNorm
<algm-HB3AAdjustSampleNorm>` with `OutputType=Detector` which can be
seen below in the example usage.
This will reduced the input workspace using the region of interest
This will reduce the input workspace using the region of interest
provided to create a 1-dimension workspace with an axis that is the
detector scan, either omega or chi in degrees. This reduced workspace
is then fitted using :ref:`Fit <algm-Fit>` with a :ref:`flat
background <func-FlatBackground>` and a :ref:`Gaussian
<func-Gaussian>`, then the area of the Gaussian is used as the peak
intensity and added to the output workspace. An optionally fitting
range of the scan axis can be provided with `StartX` and `EndX` in
degrees.
detector scan, either omega or chi in degrees.
An optional fitting range of the scan axis can be provided with
`StartX` and `EndX` in degrees, which is applicable for the
`CountsWithFitting` and `Fitted` methods.
The `OptimizeQVector` option will convert the input data into Q and
use :ref:`CentroidPeaksdMD <algm-CentroidPeaksMD>` to find the
......@@ -30,6 +28,78 @@ correct Q-vector starting from the known HKL and UB matrix of the
peaks. This will not effect the integration of the peak but allows the
UB matrix to be refined afterwards.
Integration Methods
###################
There are three different methods for integrating the input workspace:
simple counts summation, simple counts summation with fitted background,
and a fitted model.
Counts
++++++
This method uses the simple cuboid integration to approximate the
integrated peak intensity.
.. math:: I = \sum_i (C_i - B_i) \times \Delta X
where
* :math:`C_i` is the normalized detector counts in ROI of measurement *i*
* :math:`\Delta X` is the motor step
* :math:`B_i` is the estimated background
The error is calculated as
.. math:: \sigma = \sum_i \sqrt{C_i} \cdot \Delta X
The background is estimated by averaging data from the first and last scans:
.. math:: B = \frac{\sum_i^{<pt>}C_i}{|<pt>|}
where :math:`<pt>` is the number of scans to include in the background estimation
and is specified with the `NumBackgroundPts` option.
CountsWithFitting
+++++++++++++++++
For `Method=CountsWithFitting`, the input is fit to a :ref:`Gaussian
<func-Gaussian>` with a :ref:`flat background <func-FlatBackground>`,
just like `Method=Fitting`. However, the peak intensity is instead
approximated by summing the detector counts over a specific set of
measurements that are defined by the motor positions in the range of
:math:`\pm \frac{N}{2} \text{FWHM}`, where `N` is controlled with the
`WidthScale` option. The background is removed over the same
range using the fitted flat background.
.. math:: I = \sum_i^{<pt>} (C_i - B) \times \Delta X
where
* :math:`C_i` is the normalized detector counts in ROI of measurement *i*
* :math:`\Delta X` is the motor step
* :math:`B_i` is the estimated background
* the set of measurements *<pt>* is defined by the motor positions in the range of :math:`x_0 \pm \frac{N}{2}FWHM`.
- usually the default value of *N* is set to 2.
- :math:`FWHM = 2\sqrt{2\ln2}s \approx 2.3548s`
The error is calculated as
.. math:: \sigma = \sum_i \sqrt{C_i} \cdot \Delta X
F