diff --git a/Framework/PythonInterface/plugins/algorithms/HB2AReduce.py b/Framework/PythonInterface/plugins/algorithms/HB2AReduce.py
new file mode 100644
index 0000000000000000000000000000000000000000..21cfbd78d65ef963bfb9d294188109583dbf75cb
--- /dev/null
+++ b/Framework/PythonInterface/plugins/algorithms/HB2AReduce.py
@@ -0,0 +1,277 @@
+from __future__ import (absolute_import, division, print_function)
+from mantid.api import (PythonAlgorithm, AlgorithmFactory, PropertyMode, WorkspaceProperty, FileProperty,
+                        FileAction, MultipleFileProperty)
+from mantid.kernel import (Direction, IntArrayProperty, FloatTimeSeriesProperty, FloatBoundedValidator,
+                           EnabledWhenProperty, PropertyCriterion, Property)
+from mantid import logger
+import numpy as np
+import datetime
+import os
+import re
+import warnings
+
+
+class HB2AReduce(PythonAlgorithm):
+    _gaps = np.array([0.   ,    2.641,    5.287,    8.042,   10.775,   13.488,
+                      16.129,   18.814,   21.551,   24.236,   26.988,   29.616,
+                      32.312,   34.956,   37.749,   40.4  ,   43.111,   45.839,
+                      48.542,   51.207,   53.938,   56.62 ,   59.286,   61.994,
+                      64.651,   67.352,   70.11 ,   72.765,   75.492,   78.204,
+                      80.917,   83.563,   86.279,   88.929,   91.657,   94.326,
+                      97.074,   99.784,  102.494,  105.174,  107.813,  110.551,
+                      113.25 ,  115.915])
+
+    def category(self):
+        return 'Diffraction\\Reduction'
+
+    def seeAlso(self):
+        return [ "" ]
+
+    def name(self):
+        return 'HB2AReduce'
+
+    def summary(self):
+        return 'Performs data reduction for HB-2A POWDER at HFIR'
+
+    def PyInit(self):
+        self.declareProperty(MultipleFileProperty(name="Filename", action=FileAction.OptionalLoad,
+                                                  extensions=[".dat"]), "Data files to load")
+        self.declareProperty('IPTS', Property.EMPTY_INT, "IPTS number to load from")
+        self.declareProperty('Exp', Property.EMPTY_INT, "Experiment number to load from")
+        self.declareProperty(IntArrayProperty("ScanNumbers", []), 'Scan numbers to load')
+        self.declareProperty(FileProperty(name="Vanadium", defaultValue="", action=FileAction.OptionalLoad, extensions=[".dat", ".txt"]),
+                             doc="Vanadium file, can be either the vanadium scan file or the reduced vcorr file. "
+                             "If not provided the vcorr file adjacent to the data file will be used")
+        self.declareProperty('Normalise', True, "If False vanadium normalisation will not be performed")
+        self.declareProperty(IntArrayProperty("ExcludeDetectors", []),
+                             doc="Detectors to exclude. If not provided the HB2A_exp???__exclude_detectors.txt adjacent "
+                             "to the data file will be used if it exist")
+        self.declareProperty('IndividualDetectors', False,
+                             "If True the workspace will include each anode as a separate spectrum, useful for debugging issues")
+        condition = EnabledWhenProperty("IndividualDetectors", PropertyCriterion.IsDefault)
+        self.declareProperty('BinData', True, "Data will be binned using BinWidth. If False then all data will be unbinned")
+        self.setPropertySettings("BinData", condition)
+        positiveFloat = FloatBoundedValidator(lower=0., exclusive=True)
+        self.declareProperty('BinWidth', 0.05, positiveFloat, "Bin size of the output workspace")
+        self.setPropertySettings("BinWidth", condition)
+        self.declareProperty('Scale', 1.0, positiveFloat, "The output will be scaled by this value")
+        self.declareProperty(WorkspaceProperty("OutputWorkspace", "",
+                                               optional=PropertyMode.Mandatory,
+                                               direction=Direction.Output),
+                             "Output Workspace")
+
+    def validateInputs(self):
+        issues = dict()
+
+        if not self.getProperty("Filename").value:
+            if ((self.getProperty("IPTS").value == Property.EMPTY_INT) or
+                (self.getProperty("Exp").value == Property.EMPTY_INT) or
+               len(self.getProperty("ScanNumbers").value) is 0):
+                issues["Filename"] = 'Must specify either Filename or IPTS AND Exp AND ScanNumbers'
+
+        return issues
+
+    def PyExec(self):
+        scale = self.getProperty("Scale").value
+        filenames = self.getProperty("Filename").value
+
+        if not filenames:
+            ipts = self.getProperty("IPTS").value
+            exp = self.getProperty("Exp").value
+            filenames = ['/HFIR/HB2A/IPTS-{0}/exp{1}/Datafiles/HB2A_exp{1:04}_scan{2:04}.dat'.format(ipts, exp, scan)
+                         for scan in self.getProperty("ScanNumbers").value]
+
+        metadata = None
+        data = None
+
+        # Read in data array and append all files
+        for filename in filenames:
+            # Read in all lines once
+            with open(filename) as f:
+                lines = f.readlines()
+
+            if metadata is None:
+                # Read in metadata from first file only file
+                metadata = dict([np.char.strip(re.split('#(.*?)=(.*)', line, flags=re.U)[1:3])
+                                 for line in lines if re.match('^#.*=', line)])
+                # Get indir and exp from first file
+                indir, data_filename = os.path.split(filename)
+                _, exp, _ = data_filename.replace(".dat", "").split('_')
+
+            # Find size of header, the size changes
+            header = np.argmax([bool(re.match('(?!^#)', line)) for line in lines])-1
+            if header < 0:
+                raise RuntimeError("{} has no data in it".format(filename))
+            names = lines[header].split()[1:]
+
+            try:
+                d = np.loadtxt(lines[header:], ndmin=1, dtype={'names': names, 'formats':[float]*len(names)})
+            except (ValueError, IndexError):
+                raise RuntimeError("Could not read {}, file likely malformed".format(filename))
+
+            # Accumulate data
+            data = d if data is None else np.append(data, d)
+
+        # Get any masked detectors
+        detector_mask = self.get_detector_mask(exp, indir)
+
+        counts = np.array([data['anode{}'.format(n)] for n in range(1,45)])[detector_mask]
+        twotheta = data['2theta']
+        monitor = data['monitor']
+
+        # Get either vcorr file or vanadium data
+        vanadium_count, vanadium_monitor, vcorr = self.get_vanadium(detector_mask,
+                                                                    data['m1'][0], data['colltrans'][0],
+                                                                    exp, indir)
+
+        x = twotheta+self._gaps[:, np.newaxis][detector_mask]
+
+        if self.getProperty("IndividualDetectors").value:
+            # Separate spectrum per anode
+            y, e = self.process(counts, scale, monitor, vanadium_count, vanadium_monitor, vcorr)
+            NSpec=len(x)
+        else:
+            if self.getProperty("BinData").value:
+                # Data binned with bin
+                x, y, e = self.process_binned(counts, x.ravel(), scale, monitor, vanadium_count, vanadium_monitor, vcorr)
+            else:
+                y, e = self.process(counts, scale, monitor, vanadium_count, vanadium_monitor, vcorr)
+            NSpec=1
+
+        createWS_alg = self.createChildAlgorithm("CreateWorkspace", enableLogging=False)
+        createWS_alg.setProperty("DataX", x)
+        createWS_alg.setProperty("DataY", y)
+        createWS_alg.setProperty("DataE", e)
+        createWS_alg.setProperty("NSpec", NSpec)
+        createWS_alg.setProperty("UnitX", "Degrees")
+        createWS_alg.setProperty("YUnitLabel", "Counts")
+        createWS_alg.setProperty("WorkspaceTitle", str(metadata['scan_title']))
+        createWS_alg.execute()
+        outWS = createWS_alg.getProperty("OutputWorkspace").value
+
+        self.setProperty("OutputWorkspace", outWS)
+
+        self.add_metadata(outWS, metadata, data)
+
+    def get_detector_mask(self, exp, indir):
+        """Returns an anode mask"""
+        detector_mask = np.ones(44, dtype=bool)
+        if len(self.getProperty("ExcludeDetectors").value) == 0:
+            exclude_filename = os.path.join(indir, 'HB2A_{}__exclude_detectors.txt'.format(exp))
+            if os.path.isfile(exclude_filename):
+                with warnings.catch_warnings():
+                    warnings.simplefilter("ignore")
+                    exclude_detectors = np.loadtxt(exclude_filename, ndmin=1, dtype=int)
+            else:
+                exclude_detectors=np.empty(0, dtype=int)
+        else:
+            exclude_detectors = np.array(self.getProperty("ExcludeDetectors").value)
+        if len(exclude_detectors) > 0:
+            logger.notice("Excluding anodes: {}".format(exclude_detectors))
+        detector_mask[exclude_detectors-1] = False
+        return detector_mask
+
+    def get_vanadium(self, detector_mask, m1, colltrans, exp, indir):
+        """
+        This function returns either (vanadium_count, vanadium_monitor, None) or
+        (None, None, vcorr) depending what type of file is provided by getProperty("Vanadium")
+        """
+        if not self.getProperty("Normalise").value:
+            return None, None, np.ones(44)[detector_mask]
+
+        vanadium_filename = self.getProperty("Vanadium").value
+        if vanadium_filename:
+            if vanadium_filename.split('.')[-1] == 'dat':
+                vanadium = np.genfromtxt(vanadium_filename)
+                vanadium_count = vanadium[:, 5:49].sum(axis=0)[detector_mask]
+                vanadium_monitor = vanadium[:, 3].sum()
+                logger.notice("Using vanadium data file: {}".format(vanadium_filename))
+                return vanadium_count, vanadium_monitor, None
+            else:
+                vcorr_filename = vanadium_filename
+        else: # Find adjacent vcorr file
+            # m1 is the monochromator angle
+            # m1 = 0 -> Ge 115, 1.54A
+            # m1 = 9.45 -> Ge 113, 2.41A
+            # colltrans is the collimator position, whether in or out of the beam
+            # colltrans = 0 -> IN
+            # colltrans = +/-80 -> OUT
+            vcorr_filename = 'HB2A_{}__Ge_{}_{}_vcorr.txt'.format(exp,
+                                                                  115 if np.isclose(m1, 0, atol=0.1) else 113,
+                                                                  "IN" if np.isclose(colltrans, 0, atol=0.1) else "OUT")
+        vcorr_filename = os.path.join(indir, vcorr_filename)
+        logger.notice("Using vcorr file: {}".format(vcorr_filename))
+        if not os.path.isfile(vcorr_filename):
+            raise RuntimeError("Vanadium file {} does not exist".format(vcorr_filename))
+
+        return None, None, np.genfromtxt(vcorr_filename)[detector_mask]
+
+    def process(self, counts, scale, monitor, vanadium_count=None, vanadium_monitor=None, vcorr=None):
+        """Reduce data not binning"""
+        if vcorr is not None:
+            y = counts/vcorr[:, np.newaxis]/monitor
+            e = np.sqrt(counts)/vcorr[:, np.newaxis]/monitor
+        else:
+            y = counts/vanadium_count[:, np.newaxis]*vanadium_monitor/monitor
+            e = np.sqrt(1/counts + 1/vanadium_count[:, np.newaxis] + 1/vanadium_monitor + 1/monitor)*y
+        return np.nan_to_num(y*scale), np.nan_to_num(e*scale)
+
+    def process_binned(self, counts, x, scale, monitor, vanadium_count=None, vanadium_monitor=None, vcorr=None):
+        """Bin the data"""
+        binWidth = self.getProperty("BinWidth").value
+        bins = np.arange(x.min(), x.max()+binWidth, binWidth) # calculate bin boundaries
+        inds = np.digitize(x, bins) # get bin indices
+
+        # because np.broadcast_to is not in numpy 1.7.1 we use stride_tricks
+        if vcorr is not None:
+            vcorr=np.lib.stride_tricks.as_strided(vcorr, shape=counts.shape, strides=(vcorr.strides[0],0))
+            vcorr_binned = np.bincount(inds, weights=vcorr.ravel(), minlength=len(bins))
+        else:
+            vanadium_count=np.lib.stride_tricks.as_strided(vanadium_count, shape=counts.shape, strides=(vanadium_count.strides[0],0))
+            vanadium_binned = np.bincount(inds, weights=vanadium_count.ravel(), minlength=len(bins))
+            vanadium_monitor_binned = np.bincount(inds, minlength=len(bins))*vanadium_monitor
+
+        monitor=np.lib.stride_tricks.as_strided(monitor, shape=counts.shape, strides=(monitor.strides[0],0))
+
+        counts_binned = np.bincount(inds, weights=counts.ravel(), minlength=len(bins))
+        monitor_binned = np.bincount(inds, weights=monitor.ravel(), minlength=len(bins))
+        number_binned = np.bincount(inds, minlength=len(bins))
+
+        old_settings = np.seterr(all='ignore') # otherwise it will complain about divide by zero
+        if vcorr is not None:
+            y = (counts_binned/vcorr_binned*number_binned/monitor_binned)[1:]
+            e = (np.sqrt(1/counts_binned)[1:])*y
+        else:
+            y = (counts_binned/vanadium_binned*vanadium_monitor_binned/monitor_binned)[1:]
+            e = (np.sqrt(1/counts_binned + 1/vanadium_binned + 1/vanadium_monitor + 1/monitor_binned)[1:])*y
+        np.seterr(**old_settings)
+        x = bins
+        return x, np.nan_to_num(y*scale), np.nan_to_num(e*scale)
+
+    def add_metadata(self, ws, metadata, data):
+        """Adds metadata to the workspace"""
+        run = ws.getRun()
+
+        # Just copy all metadata in the file
+        for key in metadata.keys():
+            run.addProperty(key, str(metadata[key]), True)
+
+        # Add correct start and end time
+        start_time = np.datetime64(datetime.datetime.strptime(metadata['time']+' '+metadata['date'], '%I:%M:%S %p %m/%d/%Y'))
+        run.addProperty('start_time', str(start_time), True)
+
+        # Create time array for time series logs
+        time_array = start_time + np.cumsum(data['time'], dtype=np.int64)*np.timedelta64(1,'s')
+        run.addProperty('end_time', str(time_array[-1]), True)
+        run.addProperty('duration', float((time_array[-1]-time_array[0])/np.timedelta64(1, 's')), True)
+
+        # Create time series logs for the scan variables
+        for name in data.dtype.names:
+            if 'anode' not in name:
+                log = FloatTimeSeriesProperty(name)
+                for t, v in zip(time_array, data[name]):
+                    log.addValue(t, v)
+                run[name]=log
+
+
+AlgorithmFactory.subscribe(HB2AReduce)
diff --git a/Framework/PythonInterface/test/python/plugins/algorithms/CMakeLists.txt b/Framework/PythonInterface/test/python/plugins/algorithms/CMakeLists.txt
index 3fd46bdba4257e0d4a76394149f69bc1db6efcde..8f670db5e570301dabe116add62c30b0f00a2213 100644
--- a/Framework/PythonInterface/test/python/plugins/algorithms/CMakeLists.txt
+++ b/Framework/PythonInterface/test/python/plugins/algorithms/CMakeLists.txt
@@ -49,6 +49,7 @@ set ( TEST_PY_FILES
   FractionalIndexingTest.py
   GetEiT0atSNSTest.py
   GetNegMuMuonicXRDTest.py
+  HB2AReduceTest.py
   IndirectTransmissionTest.py
   IndexSatellitePeaksTest.py
   LoadAndMergeTest.py
diff --git a/Framework/PythonInterface/test/python/plugins/algorithms/HB2AReduceTest.py b/Framework/PythonInterface/test/python/plugins/algorithms/HB2AReduceTest.py
new file mode 100644
index 0000000000000000000000000000000000000000..f0e26313c502fb37c6a088a8a0bdb7cd63d6e9c4
--- /dev/null
+++ b/Framework/PythonInterface/test/python/plugins/algorithms/HB2AReduceTest.py
@@ -0,0 +1,68 @@
+from __future__ import absolute_import, division, print_function
+from mantid.simpleapi import HB2AReduce
+import unittest
+import numpy as np
+
+
+class HB2AReduceTest(unittest.TestCase):
+
+    def test_IndividualDetectors(self):
+        HB2AReduce_ws = HB2AReduce('HB2A_exp0666_scan0024.dat',
+                                   IndividualDetectors=True)
+        self.assertTrue(HB2AReduce_ws)
+        self.assertEquals(HB2AReduce_ws.getNumberHistograms(), 44)
+        self.assertEquals(HB2AReduce_ws.blocksize(), 121)
+        self.assertEquals(np.argmax(HB2AReduce_ws.extractY()), 4887)
+        self.assertAlmostEquals(np.max(HB2AReduce_ws.extractY()), 2.789331777)
+        HB2AReduce_ws.delete()
+
+    def test_NotBinned(self):
+        HB2AReduce_ws = HB2AReduce('HB2A_exp0666_scan0024.dat', BinData=False)
+        self.assertTrue(HB2AReduce_ws)
+        self.assertEquals(HB2AReduce_ws.getNumberHistograms(), 1)
+        self.assertEquals(HB2AReduce_ws.blocksize(), 5324)
+        self.assertEquals(np.argmax(HB2AReduce_ws.extractY()), 4887)
+        self.assertAlmostEquals(np.max(HB2AReduce_ws.extractY()), 2.789331777)
+        HB2AReduce_ws.delete()
+
+    def test_Binned(self):
+        HB2AReduce_ws = HB2AReduce('HB2A_exp0666_scan0024.dat')
+        self.assertTrue(HB2AReduce_ws)
+        self.assertEquals(HB2AReduce_ws.getNumberHistograms(), 1)
+        self.assertEquals(HB2AReduce_ws.blocksize(), 2439)
+        self.assertEquals(np.argmax(HB2AReduce_ws.extractY()), 2203)
+        self.assertAlmostEquals(np.max(HB2AReduce_ws.extractY()), 2.7863608266)
+        HB2AReduce_ws.delete()
+
+    def test_TwoFiles(self):
+        HB2AReduce_ws = HB2AReduce('HB2A_exp0666_scan0024.dat,HB2A_exp0666_scan0025.dat')
+        self.assertTrue(HB2AReduce_ws)
+        self.assertEquals(HB2AReduce_ws.getNumberHistograms(), 1)
+        self.assertEquals(HB2AReduce_ws.blocksize(), 2439)
+        self.assertEquals(np.argmax(HB2AReduce_ws.extractY()), 2203)
+        self.assertAlmostEquals(np.max(HB2AReduce_ws.extractY()), 2.8059953301)
+        HB2AReduce_ws.delete()
+
+    def test_Vanadium(self):
+        HB2AReduce_ws = HB2AReduce('HB2A_exp0666_scan0024.dat',
+                                   Vanadium='HB2A_exp0644_scan0018.dat')
+        self.assertTrue(HB2AReduce_ws)
+        self.assertEquals(HB2AReduce_ws.getNumberHistograms(), 1)
+        self.assertEquals(HB2AReduce_ws.blocksize(), 2439)
+        self.assertEquals(np.argmax(HB2AReduce_ws.extractY()), 2203)
+        self.assertAlmostEquals(np.max(HB2AReduce_ws.extractY()), 78.4374673238)
+        HB2AReduce_ws.delete()
+
+    def test_ExcludeDetectors(self):
+        HB2AReduce_ws = HB2AReduce('HB2A_exp0666_scan0024.dat',
+                                   ExcludeDetectors='1-20,40-42')
+        self.assertTrue(HB2AReduce_ws)
+        self.assertEquals(HB2AReduce_ws.getNumberHistograms(), 1)
+        self.assertEquals(HB2AReduce_ws.blocksize(), 1360)
+        self.assertEquals(np.argmax(HB2AReduce_ws.extractY()), 283)
+        self.assertAlmostEquals(np.max(HB2AReduce_ws.extractY()), 0.8336013246)
+        HB2AReduce_ws.delete()
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Testing/Data/UnitTest/HB2A_exp0644_scan0018.dat.md5 b/Testing/Data/UnitTest/HB2A_exp0644_scan0018.dat.md5
new file mode 100644
index 0000000000000000000000000000000000000000..0c7a88433bcafb5fed3e0d84be777147e014f711
--- /dev/null
+++ b/Testing/Data/UnitTest/HB2A_exp0644_scan0018.dat.md5
@@ -0,0 +1 @@
+df52975c9ae89b62ca4d6d9e51e819a3
diff --git a/Testing/Data/UnitTest/HB2A_exp0666__Ge_113_IN_vcorr.txt.md5 b/Testing/Data/UnitTest/HB2A_exp0666__Ge_113_IN_vcorr.txt.md5
new file mode 100644
index 0000000000000000000000000000000000000000..a2b3ef633209e966fc5f1429115337fb70c14af5
--- /dev/null
+++ b/Testing/Data/UnitTest/HB2A_exp0666__Ge_113_IN_vcorr.txt.md5
@@ -0,0 +1 @@
+7fbf5aa9d9eba9b582ed028775cfad10
diff --git a/Testing/Data/UnitTest/HB2A_exp0666_scan0024.dat.md5 b/Testing/Data/UnitTest/HB2A_exp0666_scan0024.dat.md5
new file mode 100644
index 0000000000000000000000000000000000000000..a8c44ce64ec2238e99ef0b656344fdad77aa5c85
--- /dev/null
+++ b/Testing/Data/UnitTest/HB2A_exp0666_scan0024.dat.md5
@@ -0,0 +1 @@
+f8d9e802ce88872ccd0eb9e3ebb2a53a
diff --git a/Testing/Data/UnitTest/HB2A_exp0666_scan0025.dat.md5 b/Testing/Data/UnitTest/HB2A_exp0666_scan0025.dat.md5
new file mode 100644
index 0000000000000000000000000000000000000000..35bf56d35800fbb5faebfde318ca2e8177642792
--- /dev/null
+++ b/Testing/Data/UnitTest/HB2A_exp0666_scan0025.dat.md5
@@ -0,0 +1 @@
+7e1d3a9079b6c9e6b6783d067241ec1a
diff --git a/docs/source/algorithms/HB2AReduce-v1.rst b/docs/source/algorithms/HB2AReduce-v1.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e6de3a6de2cfd578676ef20622a2e123b90986cb
--- /dev/null
+++ b/docs/source/algorithms/HB2AReduce-v1.rst
@@ -0,0 +1,154 @@
+.. algorithm::
+
+.. summary::
+
+.. relatedalgorithms::
+
+.. properties::
+
+Description
+-----------
+
+This algorithm reduces HFIR POWDER (HB-2A) data.
+
+You can either specify the filenames of data you want to reduce or provide the IPTS, exp and scan number. *e.g.* the following are equivalent:
+
+.. code-block:: python
+
+   ws = HB2AReduce('/HFIR/HB2A/IPTS-21073/exp666/Datafiles/HB2A_exp0666_scan0024.dat')
+   # and
+   ws = HB2AReduce(IPTS=21073, exp=666, ScanNumbers=24)
+
+You can specify any number of filenames or scan numbers (in a comma separated list).
+
+Vanadium
+########
+
+By default the correct vcorr file (``HB2A_exp???__Ge_[113|115]_[IN|OUT]_vcorr.txt``) adjacent to the data file will be used. Alternatively either the vcorr file or a vanadium scan file can be provided to the ``Vanadium`` option. If a vanadium scan file is provided then the vanadium counts can be taken into account when calculating the uncertainty which can not be done with using the vcorr file.
+
+If ``Normalise=False`` then no normalisation will be performed.
+
+ExcludeDetectors
+################
+
+By default the file ``HB2A_exp???__exclude_detectors.txt`` adjacent to the data file will be used unless a list of detectors to exclude are provided by ``ExcludeDetectors``
+
+IndividualDetectors
+###################
+
+If this option is True then a separate spectra will be created in the output workspace for every anode. This allows you to compare adjacent anodes.
+
+Binning Data
+############
+
+If ``BinData=True`` (default) then the data will be binned on a regular grid with a width of ``BinWidth``. The output can be scaled by an arbitrary amount by setting ``Scale``.
+
+Saving reduced data
+###################
+
+The output workspace can be saved to ``XYE``, ``Maud`` and ``TOPAS`` format using :ref:`SaveFocusedXYE <algm-SaveFocusedXYE>`. *e.g.*
+
+.. code-block:: python
+
+   # XYE with no header
+   SaveFocusedXYE(ws, Filename='data.xye', SplitFiles=False, IncludeHeader=False)
+
+   # TOPAS format
+   SaveFocusedXYE(ws, Filename='data.xye', SplitFiles=False, Format='TOPAS')
+
+   # Maud format
+   SaveFocusedXYE(ws, Filename='data.xye', SplitFiles=False, Format='MAUD')
+
+Usage
+-----
+
+**Individual Detectors**
+
+.. code-block:: python
+
+   ws=HB2AReduce('HB2A_exp0666_scan0024.dat', IndividualDetectors=True)
+
+   # Plot anodes 40, 41 and 42
+   import matplotlib.pyplot as plt
+   from mantid import plots
+   fig, ax = plt.subplots(subplot_kw={'projection':'mantid'})
+   for num in [40,41,42]:
+       ax.plot(ws, specNum=num)
+   plt.legend()
+   #fig.savefig('HB2AReduce_1.png')
+   plt.show()
+
+.. figure:: /images/HB2AReduce_1.png
+
+
+**Unbinned data**
+
+.. code-block:: python
+
+   ws=HB2AReduce('HB2A_exp0666_scan0024.dat', BinData=False)
+
+   # Plot
+   import matplotlib.pyplot as plt
+   from mantid import plots
+   fig, ax = plt.subplots(subplot_kw={'projection':'mantid'})
+   ax.plot(ws)
+   #fig.savefig('HB2AReduce_2.png')
+   plt.show()
+
+.. figure:: /images/HB2AReduce_2.png
+
+
+**Binned data**
+
+.. code-block:: python
+
+   ws=HB2AReduce('HB2A_exp0666_scan0024.dat')
+
+   # Plot
+   import matplotlib.pyplot as plt
+   from mantid import plots
+   fig, ax = plt.subplots(subplot_kw={'projection':'mantid'})
+   ax.plot(ws)
+   #fig.savefig('HB2AReduce_3.png')
+   plt.show()
+
+.. figure:: /images/HB2AReduce_3.png
+
+
+**Exclude detectors: 1-20,40,41,42**
+
+.. code-block:: python
+
+   ws=HB2AReduce('HB2A_exp0666_scan0024.dat', ExcludeDetectors='1-20,40,41,42')
+
+   # Plot
+   import matplotlib.pyplot as plt
+   from mantid import plots
+   fig, ax = plt.subplots(subplot_kw={'projection':'mantid'})
+   ax.plot(ws)
+   #fig.savefig('HB2AReduce_4.png')
+   plt.show()
+
+.. figure:: /images/HB2AReduce_4.png
+
+
+**Combining multiple files**
+
+.. code-block:: python
+
+   ws=HB2AReduce('HB2A_exp0666_scan0024.dat, HB2A_exp0666_scan0025.dat')
+
+   # Plot
+   import matplotlib.pyplot as plt
+   from mantid import plots
+   fig, ax = plt.subplots(subplot_kw={'projection':'mantid'})
+   ax.plot(ws)
+   #fig.savefig('HB2AReduce_5.png')
+   plt.show()
+
+.. figure:: /images/HB2AReduce_5.png
+
+
+.. categories::
+
+.. sourcelink::
diff --git a/docs/source/images/HB2AReduce_1.png b/docs/source/images/HB2AReduce_1.png
new file mode 100644
index 0000000000000000000000000000000000000000..fc0122f6ddbf704601e197bc451cc9df53421a04
Binary files /dev/null and b/docs/source/images/HB2AReduce_1.png differ
diff --git a/docs/source/images/HB2AReduce_2.png b/docs/source/images/HB2AReduce_2.png
new file mode 100644
index 0000000000000000000000000000000000000000..eeff3c3378f16f95aa1efb3fdb67a6cc935ac87e
Binary files /dev/null and b/docs/source/images/HB2AReduce_2.png differ
diff --git a/docs/source/images/HB2AReduce_3.png b/docs/source/images/HB2AReduce_3.png
new file mode 100644
index 0000000000000000000000000000000000000000..5d04dd73beee6d6302afae3b31b5e08a664ea2d0
Binary files /dev/null and b/docs/source/images/HB2AReduce_3.png differ
diff --git a/docs/source/images/HB2AReduce_4.png b/docs/source/images/HB2AReduce_4.png
new file mode 100644
index 0000000000000000000000000000000000000000..e4e404c6cf5620490282ee8f6eefa24d7706d70d
Binary files /dev/null and b/docs/source/images/HB2AReduce_4.png differ
diff --git a/docs/source/images/HB2AReduce_5.png b/docs/source/images/HB2AReduce_5.png
new file mode 100644
index 0000000000000000000000000000000000000000..1898e10fd69753cb7004ae2eebcaf1f566b837f2
Binary files /dev/null and b/docs/source/images/HB2AReduce_5.png differ
diff --git a/docs/source/release/v3.14.0/diffraction.rst b/docs/source/release/v3.14.0/diffraction.rst
index e4949e72987e2c0e3917c93c4e698ab4c309d043..7b75fb4f6e4214698850f3bf7582b02353828c0b 100644
--- a/docs/source/release/v3.14.0/diffraction.rst
+++ b/docs/source/release/v3.14.0/diffraction.rst
@@ -34,8 +34,6 @@ Improvements
 - :ref:`LoadWAND <algm-LoadWAND>` has grouping option added and loads faster
 - Mask workspace option added to :ref:`WANDPowderReduction <algm-WANDPowderReduction>`
 
-:ref:`Release 3.14.0 <v3.14.0>`
-
 
 Single Crystal Diffraction
 --------------------------
@@ -55,10 +53,13 @@ Bugfixes
 
 - :ref:`FindPeaksMD <algm-FindPeaksMD>` now finds peaks correctly with the crystallography convention setting and reduction with crystallography convention is tested with a system test.
 
-Total Scattering
-----------------
+Powder Diffraction
+------------------
 
-Improvements
-############
+New
+###
 
+- :ref:`HB2AReduce <algm-HB2AReduce>` algorithm reduces HFIR POWDER (HB-2A) data
 - :ref:`LoadGudrunOutput <algm-LoadGudrunOutput>` is a new algorithm that allows users to load the standard Gudrun output files into Mantid.
+
+:ref:`Release 3.14.0 <v3.14.0>`