diff --git a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANSDarkRunBackgroundCorrection.py b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANSDarkRunBackgroundCorrection.py
new file mode 100644
index 0000000000000000000000000000000000000000..158715eded83546d3009f12cdc8921209bcbaa88
--- /dev/null
+++ b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANSDarkRunBackgroundCorrection.py
@@ -0,0 +1,441 @@
+#pylint: disable=no-init,invalid-name,too-many-locals,too-many-branches
+from mantid.simpleapi import *
+from mantid.kernel import *
+from mantid.api import *
+import numpy as np
+
+class SANSDarkRunBackgroundCorrection(PythonAlgorithm):
+    def category(self):
+        return "Workflow\\SANS\\UsesPropertyManager"
+
+    def name(self):
+        return "SANSDarkRunBackgroundCorrection"
+
+    def summary(self):
+        return "Correct SANS data with a dark run measurement."
+
+    def PyInit(self):
+        self.declareProperty(MatrixWorkspaceProperty("InputWorkspace", "",
+                                                     validator=CommonBinsValidator(),
+                                                     direction=Direction.Input))
+        self.declareProperty(MatrixWorkspaceProperty("DarkRun", "",
+                                                     validator=CommonBinsValidator(),
+                                                     direction=Direction.Input))
+        self.declareProperty(MatrixWorkspaceProperty("OutputWorkspace", "",
+                                                     direction = Direction.Output),
+                                                     "The corrected SANS workspace.")
+        self.declareProperty("NormalizationRatio", 1.0, "Number to scale the dark run in order"
+                                                       "to make it comparable to the SANS run")
+        self.declareProperty("Mean", False, "If True then a mean value of all spectra is used to "
+                                             "calculate the value to subtract")
+        self.declareProperty("Uniform", True, "If True then we treat the treat the tim ebins a")
+        self.declareProperty("ApplyToDetectors", True, "If True then we apply the correction to the detector pixels")
+        self.declareProperty("ApplyToMonitors", False, "If True then we apply the correction to the monitors")
+
+        arrvalidator = IntArrayBoundedValidator()
+        arrvalidator.setLower(0)
+        self.declareProperty(IntArrayProperty("SelectedMonitors", values=[],
+                                              validator=arrvalidator,
+                                              direction=Direction.Input),
+                             "List of selected detector IDs of monitors to which the "
+                             "correction should be applied. If empty, all monitors will "
+                              "be corrected, if ApplyToMonitors has been selected.")
+    def PyExec(self):
+        # Get the workspaces
+        workspace = self.getProperty("InputWorkspace").value
+        dark_run = self.getProperty("DarkRun").value
+        dummy_output_ws_name = self.getPropertyValue("OutputWorkspace")
+
+        # Provide progress reporting
+        progress = Progress(self, 0, 1, 4)
+
+        # Get other properties
+        do_mean = self.getProperty("Mean").value
+        do_uniform = self.getProperty("Uniform").value
+        normalization_ratio = self.getProperty("NormalizationRatio").value
+        progress.report("SANSDarkRunBackgroundCorrection: Preparing the dark run for background correction...")
+
+        dark_run_normalized = None
+        # Apply normalization. Uniform means here that the time over which the data was measured is uniform, there are
+        # no particular spikes to be expected. In the non-uniform case we assume that it matters, when the data was taken
+        if do_uniform:
+            dark_run_normalized = self._prepare_uniform_correction(workspace = workspace,
+                                                                   dark_run = dark_run,
+                                                                   normalization_ratio = normalization_ratio,
+                                                                   do_mean = do_mean)
+        else:
+            dark_run_normalized = self._prepare_non_uniform_correction(workspace = workspace,
+                                                                       dark_run = dark_run,
+                                                                       normalization_ratio = normalization_ratio)
+
+        progress.report("SANSDarkRunBackgroundCorrection: Removing unwanted detectors...")
+        # Remove the detectors which are unwanted
+        dark_run_normalized = self._remove_unwanted_detectors_and_monitors(dark_run_normalized)
+
+        progress.report("SANSDarkRunBackgroundCorrection: Subtracting the background...")
+        # Subtract the normalizaed dark run from the SANS workspace
+        output_ws = self._subtract_dark_run_from_sans_data(workspace, dark_run_normalized)
+
+        self.setProperty("OutputWorkspace", output_ws)
+
+    def validateInputs(self):
+        issues = dict()
+
+        # Either the detectors and/or the monitors need to be selected
+        applyToDetectors = self.getProperty("ApplyToDetectors").value
+        applyToMonitors = self.getProperty("ApplyToMonitors").value
+
+        if not applyToDetectors and not applyToMonitors:
+            error_msg = 'Must provide either ApplyToDetectors or ApplyToMonitors or both'
+            issues['ApplyToDetectors'] = error_msg
+
+        # We only allow Workspace2D, ie not IEventWorkspaces
+        ws1 = self.getProperty("InputWorkspace").value
+        ws2 = self.getProperty("DarkRun").value
+
+        if isinstance(ws1, IEventWorkspace):
+            error_msg = 'The InputWorkspace must be a Workspace2D.'
+            issues["InputWorkspace"] = error_msg
+
+        if isinstance(ws2, IEventWorkspace):
+            error_msg = 'The DarkRun worksapce must be a Workspace2D.'
+            issues["DarkRun"] = error_msg
+
+        return issues
+
+    def _subtract_dark_run_from_sans_data(self, workspace, dark_run):
+        # Subtract the dark_run from the workspace
+        subtracted_ws_name = "_dark_run_corrected_ws"
+        alg_minus = AlgorithmManager.createUnmanaged("Minus")
+        alg_minus.initialize()
+        alg_minus.setChild(True)
+        alg_minus.setProperty("LHSWorkspace", workspace)
+        alg_minus.setProperty("RHSWorkspace", dark_run)
+        alg_minus.setProperty("OutputWorkspace", subtracted_ws_name)
+        alg_minus.execute()
+        return alg_minus.getProperty("OutputWorkspace").value
+
+    def _prepare_non_uniform_correction(self, workspace, dark_run, normalization_ratio):
+        # Make sure that the binning is the same for the scattering data and the dark run
+        dark_run = self._get_cloned(dark_run)
+        dark_run = self._rebin_dark_run(dark_run, workspace)
+        # Scale with the normalization factor
+        return self._scale_dark_run(dark_run, normalization_ratio)
+
+    def _rebin_dark_run(self, dark_run, workspace):
+        dark_run_rebin_name = "_dark_run_rebinned"
+        alg_rebin = AlgorithmManager.createUnmanaged("RebinToWorkspace")
+        alg_rebin.initialize()
+        alg_rebin.setChild(True)
+        alg_rebin.setProperty("WorkspaceToRebin", dark_run)
+        alg_rebin.setProperty("WorkspaceToMatch", workspace)
+        alg_rebin.setProperty("OutputWorkspace", dark_run_rebin_name)
+        alg_rebin.execute()
+        return alg_rebin.getProperty("OutputWorkspace").value
+
+    def _get_cloned(self, dark_run):
+        dark_run_clone_name = dark_run.name() + "_cloned"
+        alg_clone = AlgorithmManager.createUnmanaged("CloneWorkspace")
+        alg_clone.initialize()
+        alg_clone.setChild(True)
+        alg_clone.setProperty("InputWorkspace", dark_run)
+        alg_clone.setProperty("OutputWorkspace", dark_run_clone_name)
+        alg_clone.execute()
+        return alg_clone.getProperty("OutputWorkspace").value
+
+    def _prepare_uniform_correction(self, workspace, dark_run, normalization_ratio, do_mean):
+        # First we need to integrate from the dark_run. This happens in each bin
+        dark_run_integrated = self._integarate_dark_run(dark_run)
+
+        # If the mean of all detectors is required then we need to average them as well
+        if do_mean:
+            dark_run_integrated = self._perform_average_over_all_pixels(dark_run_integrated)
+
+        # The workspace needs to be scaled to match the SANS data. This is done by the normalization_factor
+        # In addition we need to spread the integrated signal evenly over all bins of the SANS data set.
+        # Note that we assume here a workspace with common bins.
+        num_bins = len(workspace.dataY(0))
+        scale_factor = normalization_ratio/float(num_bins)
+
+        return self._scale_dark_run(dark_run_integrated, scale_factor)
+
+    def _integarate_dark_run(self, dark_run):
+        '''
+        Sum up all bins for each pixel
+        @param dark_run: a bare dark run
+        @returns an integrated dark run
+        '''
+        dark_run_integrated_name = "_dark_run_integrated"
+        alg_integrate = AlgorithmManager.createUnmanaged("Integration")
+        alg_integrate.initialize()
+        alg_integrate.setChild(True)
+        alg_integrate.setProperty("InputWorkspace", dark_run)
+        alg_integrate.setProperty("OutputWorkspace", dark_run_integrated_name)
+        alg_integrate.execute()
+        return alg_integrate.getProperty("OutputWorkspace").value
+
+    def _scale_dark_run(self, dark_run, scale_factor):
+        '''
+        Scales the dark run.
+        @param dark_run: The dark run to be scaled
+        @param scale_factor: The scaling factor
+        @returns a scaled dark run
+        '''
+        dark_run_scaled_name = "_dark_run_scaled"
+        alg_scale  = AlgorithmManager.createUnmanaged("Scale")
+        alg_scale.initialize()
+        alg_scale.setChild(True)
+        alg_scale.setProperty("InputWorkspace", dark_run)
+        alg_scale.setProperty("OutputWorkspace", dark_run_scaled_name)
+        alg_scale.setProperty("Operation", "Multiply")
+        alg_scale.setProperty("Factor", scale_factor)
+        alg_scale.execute()
+        return alg_scale.getProperty("OutputWorkspace").value
+
+    def _perform_average_over_all_pixels(self, dark_run_integrated):
+        '''
+        At this point we expect a dark run workspace with one entry for each pixel,ie
+        after integration. The average value of all pixels is calculated. This value
+        replaces the current value
+        @param dark_run_integrated: a dark run with integrated pixels
+        @returns an averaged, integrated dark run
+        '''
+        dark_run_summed_name= "_summed_spectra"
+        alg_sum  = AlgorithmManager.createUnmanaged("SumSpectra")
+        alg_sum.initialize()
+        alg_sum.setChild(True)
+        alg_sum.setProperty("InputWorkspace",  dark_run_integrated)
+        alg_sum.setProperty("OutputWorkspace", dark_run_summed_name)
+        alg_sum.execute()
+        dark_run_summed = alg_sum.getProperty("OutputWorkspace").value
+
+        # Get the single value out of the summed workspace and divide it
+        # by the number of pixels
+        summed_value = dark_run_summed.dataY(0)[0]
+        num_pixels = dark_run_integrated.getNumberHistograms()
+        averaged_value = summed_value/float(num_pixels)
+
+        # Apply the averaged value to all pixels. Set values to unity. Don't
+        # divide workspaces as this will alter the y unit.
+        for index in range(0, dark_run_integrated.getNumberHistograms()):
+            dark_run_integrated.dataY(index)[0] = 1.0
+            dark_run_integrated.dataE(index)[0] = 1.0
+
+        # Now that we have a unity workspace multiply with the unit value
+        return self._scale_dark_run(dark_run_integrated,averaged_value)
+
+    def _remove_unwanted_detectors_and_monitors(self, dark_run):
+        # If we want both the monitors and the detectors, then we don't have to do anything
+        applyToDetectors = self.getProperty("ApplyToDetectors").value
+        applyToMonitors = self.getProperty("ApplyToMonitors").value
+        selected_monitors = self.getProperty("SelectedMonitors").value
+
+        detector_cleaned_dark_run = None
+        remover = DarkRunMonitorAndDetectorRemover()
+        if applyToDetectors and applyToMonitors and len(selected_monitors) == 0:
+            # If the user wants everything, then we don't have to do anything here
+            detector_cleaned_dark_run = dark_run
+        elif applyToDetectors and not applyToMonitors:
+            # We want to set the monitors to 0
+            detector_cleaned_dark_run = remover.set_pure_detector_dark_run(dark_run)
+        elif applyToMonitors and not applyToDetectors:
+            # We want to set the detectors to 0
+            detector_cleaned_dark_run = remover.set_pure_monitor_dark_run(dark_run, selected_monitors)
+        elif applyToDetectors and applyToMonitors and len(selected_monitors) > 0:
+            # We only want to set the detecors to 0 which are not sepecifically mentioned
+            detector_cleaned_dark_run = remover.set_mixed_monitor_detector_dark_run(dark_run, selected_monitors)
+        else:
+            raise RuntimeError("SANSDarkRunBackgroundCorrection: Must provide either "
+                               "ApplyToDetectors or ApplyToMonitors or both")
+
+        return detector_cleaned_dark_run
+
+
+class DarkRunMonitorAndDetectorRemover(object):
+    '''
+    This class can set detecors or monitors to 0. Either all monitors can be seletected or only
+    a single one.
+    '''
+    def __init__(self):
+        super(DarkRunMonitorAndDetectorRemover, self).__init__()
+
+    def set_pure_detector_dark_run(self, dark_run):
+        '''
+        Sets all monitors on the dark run workspace to 0.
+        @param dark_run: the dark run workspace
+        '''
+        # Get the list of monitor workspace indices
+        monitor_list = self.find_monitor_workspace_indices(dark_run)
+
+        # Since we only have around 10 or so monitors
+        # we set them manually to 0
+        for ws_index, dummy_det_id in monitor_list:
+            data = dark_run.dataY(ws_index)
+            error = dark_run.dataE(ws_index)
+            data = data*0
+            error = error*0
+            dark_run.setY(ws_index,data)
+            dark_run.setE(ws_index,error)
+
+        return dark_run
+
+    def find_monitor_workspace_indices(self, dark_run):
+        '''
+        Finds all monitor workspace indices
+        @param dark_run: the dark run workspace
+        @returns a zipped list of workspace/detids
+        '''
+        monitor_list = []
+        det_id_list = []
+        # pylint: disable=bare-except
+        try:
+            num_histograms = dark_run.getNumberHistograms()
+            for index in range(0, num_histograms):
+                det = dark_run.getDetector(index)
+                if det.isMonitor():
+                    det_id_list.append(det.getID())
+                    monitor_list.append(index)
+        except:
+            Logger("DarkRunMonitorAndDetectorRemover").information("There was an issue when trying "
+                                                                   "to extract the monitor list from workspace")
+        return list(zip(monitor_list, det_id_list))
+
+    def set_pure_monitor_dark_run(self, dark_run, monitor_selection):
+        '''
+        We copy the monitors, set everything to 0 and reset the monitors.
+        Since there are only  a few monitors, this should not be very costly.
+        @param dark_run: the dark run
+        @param monitor_selection: the monitors which are selected
+        @raise RuntimeError: If the selected monitor workspace index does not exist.
+        '''
+        # Get the list of monitor workspace indices
+        monitor_list = self.find_monitor_workspace_indices(dark_run)
+
+        # Get the monitor selection
+        selected_monitors = self._get_selected_monitors(monitor_selection, monitor_list)
+
+        # Grab the monitor Y and E values
+        list_dataY, list_dataE = self._get_monitor_values(dark_run, monitor_list)
+
+        # Set everything to 0
+        scale_factor = 0.0
+        dark_run_scaled_name = "dark_run_scaled"
+
+        alg_scale  = AlgorithmManager.createUnmanaged("Scale")
+        alg_scale.initialize()
+        alg_scale.setChild(True)
+        alg_scale.setProperty("InputWorkspace", dark_run)
+        alg_scale.setProperty("OutputWorkspace", dark_run_scaled_name)
+        alg_scale.setProperty("Operation", "Multiply")
+        alg_scale.setProperty("Factor", scale_factor)
+        alg_scale.execute()
+        dark_run = alg_scale.getProperty("OutputWorkspace").value
+
+        # Reset the monitors which are required. Either we reset all monitors
+        # or only a specific set of monitors which was selected by the user.
+        if len(selected_monitors) > 0:
+            dark_run = self._set_only_selected_monitors(dark_run, list_dataY, list_dataE,
+                                                        monitor_list, selected_monitors)
+        else:
+            dark_run = self._set_all_monitors(dark_run, list_dataY,
+                                              list_dataE, monitor_list)
+        return dark_run
+
+    def set_mixed_monitor_detector_dark_run(self, dark_run, monitor_selection):
+        '''
+        We only unset the monitors which are not sepcifically listed
+        @param dark_run: the dark run
+        @param monitor_selection: the monitors which are selected
+        @raise RuntimeError: If the selected monitor workspace index does not exist.
+        '''
+        # Get the list of monitor workspace indices
+        monitor_list = self.find_monitor_workspace_indices(dark_run)
+
+         # Get the monitor selection
+        selection = self._get_selected_monitors(monitor_selection, monitor_list)
+
+        # Grab the monitor Y and E values
+        list_dataY, list_dataE = self._get_monitor_values(dark_run, monitor_list)
+
+        # Now set the monitors to zero and apply leave the detectors as they are
+        dark_run = self.set_pure_detector_dark_run(dark_run)
+
+        # Reset the selected monitors
+        return self._set_only_selected_monitors(dark_run, list_dataY, list_dataE,
+                                                monitor_list, selection)
+
+    def _get_selected_monitors(self, monitor_selection, monitor_list):
+        '''
+        Checks and gets the monitor selection, ie checks for sanity and removes duplicates
+        @param monitor_selection: the monitors which are selected
+        @param monitor_list: the list of monitors
+        @raise RuntimeError: If the selected monitor workspace index does not exist.
+        '''
+        det_id_list = []
+        if len(monitor_list) != 0:
+            det_id_list = zip(*monitor_list)[1]
+
+        selected_monitors = []
+        if len(monitor_selection) > 0:
+            selected_monitors = set(monitor_selection)
+            if not selected_monitors.issubset(set(det_id_list)):
+                raise RuntimeError("DarkRunMonitorAndDetectorRemover: "
+                                   "The selected monitors are not part of the workspace. "
+                                   "Make sure you have selected a monitor workspace index "
+                                   "which is part of the workspace")
+        return selected_monitors
+
+    def _get_monitor_values(self, dark_run, monitor_list):
+        '''
+        Gets the Y and E values of the monitors of the dark run
+        @param dark_run: the dark run
+        @param monitor_list: the list of monitors
+        @returns one array with y values and one array with e values
+        '''
+        list_dataY = []
+        list_dataE = []
+        for ws_index, dummy_det_id in monitor_list:
+            list_dataY.append(np.copy(dark_run.dataY(ws_index)))
+            list_dataE.append(np.copy(dark_run.dataE(ws_index)))
+        return list_dataY, list_dataE
+
+    def _set_all_monitors(self, dark_run, list_dataY, list_dataE, monitor_list):
+        '''
+        We reset all monitors back to the old values
+        @param dark_run: the dark run workspace
+        @param list_dataY: the old Y data
+        @param list_dataE: the old E data
+        @param monitor_list: a colleciton of monitors
+        @returns the reset dark run workspace
+        '''
+        counter = 0
+        for ws_index, dummy_det_id in monitor_list:
+            dark_run.setY(ws_index, list_dataY[counter])
+            dark_run.setE(ws_index, list_dataE[counter])
+            counter += 1
+        return dark_run
+    #pylint: disable=too-many-arguments
+    def _set_only_selected_monitors(self, dark_run, list_dataY, list_dataE,
+                                    monitor_list, selected_monitors):
+        '''
+        Resets indivisual monitors
+        @param dark_run: the dark run workspace
+        @param list_dataY: the old Y data
+        @param list_dataE: the old E data
+        @param monitor_list: a colleciton of monitors
+        @param selected_monitors: a collection of monitors which need to be reset
+        @returns the reset dark run workspace
+        '''
+        # The selected monitors is a detector ID, hence we need to compare it with
+        # a detector ID, but we use the assoicated workspace index to correct the data
+        counter = 0
+        for ws_index, det_id in monitor_list:
+            # Only add the data back for the specified monitors
+            if det_id in selected_monitors:
+                dark_run.setY(ws_index, list_dataY[counter])
+                dark_run.setE(ws_index, list_dataE[counter])
+            counter +=1
+        return dark_run
+#############################################################################################
+
+AlgorithmFactory.subscribe(SANSDarkRunBackgroundCorrection)
diff --git a/Framework/PythonInterface/test/python/plugins/algorithms/CMakeLists.txt b/Framework/PythonInterface/test/python/plugins/algorithms/CMakeLists.txt
index 55e026cbad4bfd8559b1fa46f352a20ad42275ae..32cbdf451aebfba9a1c010c76142112e9c9f4e40 100644
--- a/Framework/PythonInterface/test/python/plugins/algorithms/CMakeLists.txt
+++ b/Framework/PythonInterface/test/python/plugins/algorithms/CMakeLists.txt
@@ -80,6 +80,7 @@ set ( TEST_PY_FILES
   SwapWidthsTest.py
   SymmetriseTest.py
   UpdatePeakParameterTableValueTest.py
+  SANSDarkRunBackgroundCorrectionTest.py
   SANSSubtractTest.py
   TimeSliceTest.py
   TOFTOFConvertTofToDeltaETest.py
diff --git a/Framework/PythonInterface/test/python/plugins/algorithms/SANSDarkRunBackgroundCorrectionTest.py b/Framework/PythonInterface/test/python/plugins/algorithms/SANSDarkRunBackgroundCorrectionTest.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3ff74af98c27c78a94e0832de2ddaba6007cf04
--- /dev/null
+++ b/Framework/PythonInterface/test/python/plugins/algorithms/SANSDarkRunBackgroundCorrectionTest.py
@@ -0,0 +1,763 @@
+import unittest
+
+from mantid.kernel import *
+from mantid.api import *
+from testhelpers import run_algorithm
+import numpy as np
+from SANSDarkRunBackgroundCorrection import DarkRunMonitorAndDetectorRemover
+from SANSDarkRunBackgroundCorrection import SANSDarkRunBackgroundCorrection
+
+class SANSDarkRunBackgroundCorrectionTest(unittest.TestCase):
+    #-----
+    # Workspace2D tests
+
+    def test_dark_run_correction_with_uniform_and_not_mean_for_workspace2D_input(self):
+        # Arrange
+        spectra = 4
+        bin_boundaries_scatter = 5
+        y_value_scatter_run = 4.
+        e_value_scatter_run = 1.
+        name_scatter = "_scatter_SANS_test"
+        self._provide_workspace2D(bin_boundaries_scatter, y_value_scatter_run,
+                                  e_value_scatter_run,name_scatter, spectra)
+
+        bin_boundaries_dark_run = 20
+        y_value_dark_run = 0.3
+        e_value_dark_run = 0.
+        name_dark_run = "_dark_run_SANS_test"
+        self._provide_workspace2D(bin_boundaries_dark_run, y_value_dark_run,
+                                  e_value_dark_run,name_dark_run, spectra)
+        # Algorithm configuration
+        mean = False
+        uniform = True
+        normalization_ratio = 0.5
+
+        # Act
+        out_ws_name = "out_test"
+        alg = run_algorithm(
+                    'SANSDarkRunBackgroundCorrection',
+                    InputWorkspace= name_scatter,
+                    DarkRun = name_dark_run,
+                    Mean = mean,
+                    Uniform =uniform,
+                    NormalizationRatio=normalization_ratio,
+                    OutputWorkspace = out_ws_name,
+                    ApplyToDetectors = True,
+                    ApplyToMonitors = False,
+                    SelectedMonitors = [],
+                    rethrow = True)
+
+        # Assert
+        # We should sum up all bins in the dark run (all y values, hence bin_boundaries_dark_run - 1).
+        # Then multpliy by the normalization ratio 
+        # Then divide by the bins in the scatterer.
+        expected_integration = y_value_dark_run* float(bin_boundaries_dark_run - 1)
+        expected_correction_value = (normalization_ratio*expected_integration/float(bin_boundaries_scatter - 1))
+
+        self.assertTrue(AnalysisDataService.doesExist(out_ws_name))
+        self._check_output_workspace(mtd[name_scatter],
+                                     mtd[out_ws_name],
+                                     expected_correction_value)
+
+        # Clean up
+        ws_to_clean = [out_ws_name, name_dark_run, name_scatter]
+        self._clean_up(ws_to_clean)
+
+    def test_dark_run_correction_with_uniform_and_mean_for_workspace2D_input(self):
+        # Arrange
+        spectra = 4
+
+        bin_boundaries_scatter = 5
+        y_value_scatter_run = 4.
+        e_value_scatter_run = 1.
+        name_scatter = "_scatter_SANS_test"
+        self._provide_workspace2D(bin_boundaries_scatter, y_value_scatter_run,
+                                    e_value_scatter_run,name_scatter, spectra)
+
+        bin_boundaries_dark_run = 20
+        y_value_spectra_even_dark_run = [0.3 for element in xrange(bin_boundaries_dark_run - 1)]
+        y_value_spectra_odd_dark_run = [0.2 for element in xrange(bin_boundaries_dark_run - 1)]
+        y_value_dark_run = (y_value_spectra_even_dark_run + y_value_spectra_odd_dark_run + 
+                            y_value_spectra_even_dark_run + y_value_spectra_odd_dark_run)
+        e_value_dark_run = 0
+        name_dark_run = "_dark_run_SANS_test"
+        self._provide_workspace2D(bin_boundaries_dark_run, y_value_dark_run,
+                                                e_value_dark_run,name_dark_run, spectra, True)
+        # Algorithm configuration
+        mean = True
+        uniform = True
+        normalization_ratio = 0.5
+
+        # Act
+        out_ws_name = "out_test"
+        alg = run_algorithm(
+                    'SANSDarkRunBackgroundCorrection',
+                    InputWorkspace= name_scatter,
+                    DarkRun = name_dark_run,
+                    Mean = mean,
+                    Uniform =uniform,
+                    NormalizationRatio=normalization_ratio,
+                    OutputWorkspace = out_ws_name,
+                    ApplyToDetectors = True,
+                    ApplyToMonitors = False,
+                    SelectedMonitors = [],
+                    rethrow = True)
+
+        # Assert
+        # We should sum up all bins in the dark run (all y values, hence bin_boundaries_dark_run - 1).
+        # Then multpliy by the normalization ratio
+        # Then divide by the bins in the scatterer.
+        expected_integration = sum(y_value_dark_run)/float(mtd[name_dark_run].getNumberHistograms())
+        expected_correction_value = (normalization_ratio*expected_integration/float(bin_boundaries_scatter - 1))
+
+        self.assertTrue(AnalysisDataService.doesExist(out_ws_name))
+        self._check_output_workspace(mtd[name_scatter],
+                                     mtd[out_ws_name],
+                                     expected_correction_value)
+
+        # Clean up
+        ws_to_clean = [out_ws_name, name_dark_run, name_scatter]
+        self._clean_up(ws_to_clean)
+
+    def test_dark_run_correction_with_non_uniform_and_not_mean_for_workspace2D_input(self):
+        # Arrange
+        spectra = 4
+
+        bin_boundaries= 5
+        y_value_scatter_run = spectra*[element for element in xrange(bin_boundaries-1)]
+        e_value_scatter_run = 1.
+        name_scatter = "_scatter_SANS_test"
+        self._provide_workspace2D(bin_boundaries, y_value_scatter_run,
+                                    e_value_scatter_run,name_scatter, spectra, True)
+
+        y_value_dark_run = spectra*[element*0.2 for element in xrange(bin_boundaries - 1)]
+        e_value_dark_run = 0
+        name_dark_run = "_dark_run_SANS_test"
+        self._provide_workspace2D(bin_boundaries, y_value_dark_run,
+                                  e_value_dark_run, name_dark_run, spectra, True)
+        # Algorithm configuration
+        mean = False
+        uniform = False
+        normalization_ratio = 0.6
+
+        # Act
+        out_ws_name = "out_test"
+        alg = run_algorithm(
+                    'SANSDarkRunBackgroundCorrection',
+                    InputWorkspace= name_scatter,
+                    DarkRun = name_dark_run,
+                    Mean = mean,
+                    Uniform =uniform,
+                    NormalizationRatio=normalization_ratio, 
+                    OutputWorkspace = out_ws_name,
+                    ApplyToDetectors = True,
+                    ApplyToMonitors = False,
+                    SelectedMonitors = [],
+                    rethrow = True)
+        
+        # Assert
+        # We should sum up all bins in the dark run (all y values, hence bin_boundaries_dark_run - 1).
+        # Then multpliy by the normalization ratio 
+        # Then divide by the bins in the scatterer.
+        expected_correction_value = normalization_ratio
+        self.assertTrue(AnalysisDataService.doesExist(out_ws_name))
+        self._check_output_workspace_non_uniform(mtd[name_scatter],
+                                                 mtd[out_ws_name],
+                                                 mtd[name_dark_run],
+                                                 expected_correction_value)
+
+        # Clean up
+        ws_to_clean = [out_ws_name, name_dark_run, name_scatter]
+        self._clean_up(ws_to_clean)
+
+    def test_that_only_monitors_are_corrected_if_only_monitors_should_be_corrected(self):
+        # Arrange
+        monY_scatter = 1.
+        monE_scatter = 1.
+        dataY_scatter = 2.
+        dataE_scatter = 2.
+        scatter_ws = self._load_workspace_with_monitors(monY_scatter, monE_scatter,
+                                                        dataY_scatter, dataE_scatter,
+                                                        as_dark_run = False)
+        monY_dark = 3.
+        monE_dark = 3.
+        dataY_dark = 4.
+        dataE_dark = 4.
+        dark_run = self._load_workspace_with_monitors(monY_dark, monE_dark,
+                                                      dataY_dark, dataE_dark,
+                                                      as_dark_run = True)
+
+        mean = False
+        uniform = True
+        normalization_ratio = 0.6
+        applyToMonitors = True
+        applyToDetectors = False
+        out_ws_name = "out_test"
+        selected_monitor = []
+        # Act
+        ws = self._do_run_dark_subtraction(scatter_ws, dark_run, mean, uniform, normalization_ratio,
+                                      out_ws_name, applyToMonitors, applyToDetectors, selected_monitor)
+
+        # Assert
+        self.assertAlmostEquals(ws.getNumberHistograms(), scatter_ws.getNumberHistograms(), 5)
+
+        comparison = lambda data, expected : all([self.assertAlmostEqual(data[i], expected, 5, "Should be equal")
+                                                 for i in range(0, len(data))])
+
+        # Expected value for monitors
+        expected_monitor_Y = monY_scatter - monY_dark*len(dark_run.dataY(0))/len(scatter_ws.dataY(0))*normalization_ratio
+        comparison(ws.dataY(0), expected_monitor_Y)
+        comparison(ws.dataY(1), expected_monitor_Y)
+
+        # Expected value for detectors
+        expected_detector_Y = dataY_scatter
+        for index in range(2, ws.getNumberHistograms()):
+            comparison(ws.dataY(index), expected_detector_Y)
+
+    def test_that_individual_monitor_is_corrected_if_only_individual_monitor_is_chosen(self):
+        # Arrange
+        monY_scatter = 1.
+        monE_scatter = 1.
+        dataY_scatter = 2.
+        dataE_scatter = 2.
+        scatter_ws = self._load_workspace_with_monitors(monY_scatter, monE_scatter,
+                                                        dataY_scatter, dataE_scatter,
+                                                        as_dark_run = False)
+        monY_dark = 3.
+        monE_dark = 3.
+        dataY_dark = 4.
+        dataE_dark = 4.
+        dark_run = self._load_workspace_with_monitors(monY_dark, monE_dark,
+                                                      dataY_dark, dataE_dark,
+                                                      as_dark_run = True)
+
+        mean = False
+        uniform = True
+        normalization_ratio = 0.6
+        applyToMonitors = True
+        applyToDetectors = False
+        out_ws_name = "out_test"
+        selected_monitor = [2] 
+        # Act
+        ws = self._do_run_dark_subtraction(scatter_ws, dark_run, mean, uniform, normalization_ratio,
+                                      out_ws_name, applyToMonitors, applyToDetectors, selected_monitor)
+
+        # Assert
+        self.assertAlmostEquals(ws.getNumberHistograms(), scatter_ws.getNumberHistograms(), 5)
+
+        comparison = lambda data, expected : all([self.assertAlmostEqual(data[i], expected, 5, "Should be equal")
+                                                  for i in range(0, len(data))])
+
+        # Expected value for monitor 2 -- workspace index 1
+        expected_monitor_Y_1 = monY_scatter - monY_dark*len(dark_run.dataY(0))/len(scatter_ws.dataY(0))*normalization_ratio
+        comparison(ws.dataY(1), expected_monitor_Y_1)
+
+        # Expected value for monitor 1  -- workspace index 0
+        expected_monitor_Y_0 = monY_scatter
+        comparison(ws.dataY(0), expected_monitor_Y_0)
+
+        # Expected value for detectors
+        expected_detector_Y = dataY_scatter
+        for index in range(2, ws.getNumberHistograms()):
+            comparison(ws.dataY(index), expected_detector_Y)
+
+    def test_that_selecting_monitors_and_detectors_is_allowed(self):
+        # Arrange
+        monY_scatter = 1.
+        monE_scatter = 1.
+        dataY_scatter = 2.
+        dataE_scatter = 2.
+        scatter_ws = self._load_workspace_with_monitors(monY_scatter, monE_scatter,
+                                                        dataY_scatter, dataE_scatter,
+                                                        as_dark_run = False)
+        monY_dark = 3.
+        monE_dark = 3.
+        dataY_dark = 4.
+        dataE_dark = 4.
+        dark_run = self._load_workspace_with_monitors(monY_dark, monE_dark,
+                                                      dataY_dark, dataE_dark,
+                                                      as_dark_run = True)
+
+        mean = False
+        uniform = True
+        normalization_ratio = 0.6
+        applyToMonitors = True
+        applyToDetectors = True
+        out_ws_name = "out_test"
+        selected_monitor = []
+        # Act
+        ws = self._do_run_dark_subtraction(scatter_ws, dark_run, mean, uniform, normalization_ratio,
+                                      out_ws_name, applyToMonitors, applyToDetectors, selected_monitor)
+
+        # Assert
+        self.assertAlmostEquals(ws.getNumberHistograms(), scatter_ws.getNumberHistograms(),5)
+
+        comparison = lambda data, expected : all([self.assertAlmostEqual(data[i], expected, 5, "Should be equal")
+                                                  for i in range(0, len(data))])
+
+        # Expected value for monitors
+        expected_monitor_Y = monY_scatter - monY_dark*len(dark_run.dataY(0))/len(scatter_ws.dataY(0))*normalization_ratio
+        comparison(ws.dataY(1), expected_monitor_Y)
+        comparison(ws.dataY(0), expected_monitor_Y)
+
+        # Expected value for detectors
+        expected_detector_Y = dataY_scatter - dataY_dark*len(dark_run.dataY(0))/len(scatter_ws.dataY(0))*normalization_ratio
+        for index in range(2, ws.getNumberHistograms()):
+            comparison(ws.dataY(index), expected_detector_Y)
+
+    def test_that_selecting_invidual_monitors_and_detectors_is_allowed(self):
+        # Arrange
+        monY_scatter = 1.
+        monE_scatter = 1.
+        dataY_scatter = 2.
+        dataE_scatter = 2.
+        scatter_ws = self._load_workspace_with_monitors(monY_scatter, monE_scatter,
+                                                        dataY_scatter, dataE_scatter,
+                                                        as_dark_run = False)
+        monY_dark = 3.
+        monE_dark = 3.
+        dataY_dark = 4.
+        dataE_dark = 4.
+        dark_run = self._load_workspace_with_monitors(monY_dark, monE_dark,
+                                                      dataY_dark, dataE_dark,
+                                                      as_dark_run = True)
+        mean = False
+        uniform = True
+        normalization_ratio = 0.6
+        applyToMonitors = True
+        applyToDetectors = True
+        out_ws_name = "out_test"
+        selected_monitor = [2]
+        # Act
+        ws = self._do_run_dark_subtraction(scatter_ws, dark_run, mean, uniform, normalization_ratio,
+                                      out_ws_name, applyToMonitors, applyToDetectors, selected_monitor)
+
+        # Assert
+        self.assertAlmostEquals(ws.getNumberHistograms(), scatter_ws.getNumberHistograms(), 5)
+
+        comparison = lambda data, expected : all([self.assertAlmostEqual(data[i], expected, 5, "Should be equal")
+                                                  for i in range(0, len(data))])
+
+        # Expected value for monitor 2 -- workspace index 1
+        expected_monitor_Y_1 = monY_scatter - monY_dark*len(dark_run.dataY(0))/len(scatter_ws.dataY(0))*normalization_ratio
+        comparison(ws.dataY(1), expected_monitor_Y_1)
+
+        # Expected value for monitor 1 -- workspace index 0
+        expected_monitor_Y_0 = monY_scatter
+        comparison(ws.dataY(0), expected_monitor_Y_0)
+
+        # Expected value for detectors
+        expected_detector_Y = dataY_scatter - dataY_dark*len(dark_run.dataY(0))/len(scatter_ws.dataY(0))*normalization_ratio
+        for index in range(2, ws.getNumberHistograms()):
+            comparison(ws.dataY(index), expected_detector_Y)
+
+    def test_that_throws_if_monitor_selection_is_invalid(self):
+        # Arrange
+        monY_scatter = 1.
+        monE_scatter = 1.
+        dataY_scatter = 2.
+        dataE_scatter = 2.
+        scatter_ws = self._load_workspace_with_monitors(monY_scatter, monE_scatter,
+                                                        dataY_scatter, dataE_scatter,
+                                                        as_dark_run = False)
+
+        monY_dark = 3.
+        monE_dark = 3.
+        dataY_dark = 4.
+        dataE_dark = 4.
+        dark_run = self._load_workspace_with_monitors(monY_dark, monE_dark,
+                                                      dataY_dark, dataE_dark,
+                                                      as_dark_run = True)
+
+        mean = False
+        uniform = True
+        normalization_ratio = 0.6
+        applyToMonitors = True
+        applyToDetectors = False
+        out_ws_name = "out_test"
+        selected_monitor = [3] # only has det IDs 1 and 2 as monitors
+        # Act + Assert
+        kwds = {"InputWorkspace": scatter_ws,
+                "DarkRun": dark_run,
+                "NormalizationRatio": normalization_ratio,
+                "Mean": mean,
+                "Uniform": uniform,
+                "ApplyToDetectors": applyToDetectors,
+                "ApplyToMonitors": applyToMonitors,
+                "SelectedMonitors": selected_monitor,
+                "OutputWorkspace": "out_ws"}
+
+        scatter_name = "scatter_workspace_test"
+        dark_name = "dark_workspace_test"
+
+        AnalysisDataService.add(scatter_name, scatter_ws)
+        AnalysisDataService.add(dark_name, dark_run)
+        self.assertRaises(RuntimeError, SANSDarkRunBackgroundCorrection, **kwds)
+
+        # Clean up
+        ws_to_clean = [scatter_name, dark_name]
+        self._clean_up(ws_to_clean)
+
+    def test_that_throws_if_neither_monitor_nor_detectors_are_selected(self):
+        # Arrange
+        monY_scatter = 1.
+        monE_scatter = 1.
+        dataY_scatter = 2.
+        dataE_scatter = 2.
+        scatter_ws = self._load_workspace_with_monitors(monY_scatter, monE_scatter,
+                                                        dataY_scatter, dataE_scatter,
+                                                        as_dark_run = False)
+        monY_dark = 3.
+        monE_dark = 3.
+        dataY_dark = 4.
+        dataE_dark = 4.
+        dark_run = self._load_workspace_with_monitors(monY_dark, monE_dark,
+                                                      dataY_dark, dataE_dark,
+                                                      as_dark_run = True)
+        mean = False
+        uniform = True
+        normalization_ratio = 0.6
+        applyToMonitors = False
+        applyToDetectors = False
+        out_ws_name = "out_test"
+        selected_monitor = []
+        # Act + Assert
+        kwds = {"InputWorkspace": scatter_ws,
+                "DarkRun": dark_run,
+                "NormalizationRatio": normalization_ratio,
+                "Mean": mean,
+                "Uniform": uniform,
+                "ApplyToDetectors": applyToDetectors,
+                "ApplyToMonitors": applyToMonitors,
+                "SelectedMonitors": selected_monitor,
+                "OutputWorkspace": "out_ws"}
+
+        scatter_name = "scatter_workspace_test"
+        dark_name = "dark_workspace_test"
+
+        AnalysisDataService.add(scatter_name, scatter_ws)
+        AnalysisDataService.add(dark_name, dark_run)
+        self.assertRaises(RuntimeError, SANSDarkRunBackgroundCorrection, **kwds)
+
+        # Clean up
+        ws_to_clean = [scatter_name, dark_name]
+        self._clean_up(ws_to_clean)
+
+    #------
+    # Helper methods
+    def _create_test_workspace(self, name, x, y, error, number_of_spectra):
+        alg = run_algorithm('CreateWorkspace',
+                            DataX = x,
+                            DataY = y,
+                            DataE = error,
+                            NSpec = number_of_spectra,
+                            OutputWorkspace= name)
+        return alg.getPropertyValue("OutputWorkspace")
+
+    def _check_output_workspace(self, original_ws, corrected_ws, expected_correction_value):
+        # Iterate over all spectra
+        num_spectra = original_ws.getNumberHistograms()
+
+        for index in range(0, num_spectra):
+            y_original = original_ws.dataY(index)
+            y_corrected = corrected_ws.dataY(index)
+            for elem in range(0, len(y_original)):
+                expected = y_original[elem] - expected_correction_value
+                self.assertAlmostEqual(expected,
+                                       y_corrected[elem], 4)
+
+    def _do_run_dark_subtraction(self, scatter, dark_run, mean, uniform, normalization_ratio,
+                                 out_ws_name, applyToMonitors, applyToDetectors, selected_monitor):
+        alg_dark  = AlgorithmManager.createUnmanaged("SANSDarkRunBackgroundCorrection")
+        alg_dark.initialize()
+        alg_dark.setChild(True)
+        alg_dark.setProperty("InputWorkspace", scatter)
+        alg_dark.setProperty("DarkRun", dark_run)
+        alg_dark.setProperty("Mean", mean)
+        alg_dark.setProperty("Uniform", uniform)
+        alg_dark.setProperty("NormalizationRatio", normalization_ratio)
+        alg_dark.setProperty("OutputWorkspace", out_ws_name)
+        alg_dark.setProperty("ApplyToMonitors", applyToMonitors)
+        alg_dark.setProperty("ApplyToDetectors", applyToDetectors)
+        alg_dark.setProperty("SelectedMonitors", selected_monitor)
+        alg_dark.execute()
+
+        return alg_dark.getProperty("OutputWorkspace").value
+
+    def _check_output_workspace_non_uniform(self, original_ws, corrected_ws,
+                                            dark_ws, expected_correction_value):
+        # Iterate over all spectra
+        num_spectra = original_ws.getNumberHistograms()
+
+        for index in range(0, num_spectra):
+            y_original = original_ws.dataY(index)
+            y_dark = dark_ws.dataY(index)
+            y_corrected = corrected_ws.dataY(index)
+            for elem in range(0, len(y_original)):
+                expected = y_original[elem] - y_dark[elem]*expected_correction_value
+                self.assertAlmostEqual(expected, y_corrected[elem], 4)
+
+    def _provide_workspace2D(self, bin_boundaries, y_value, e_value, name, spectra, use_y_list = False):
+        x = spectra*[element for element in xrange(bin_boundaries)]
+        y = None
+        if use_y_list:
+            y = y_value
+        else:
+            y = spectra*[y_value for element in xrange(bin_boundaries - 1)]
+        e = spectra*[e_value for element in xrange(bin_boundaries - 1)]
+        self._create_test_workspace(name, x, y, e, spectra)
+
+    def _clean_up(self, ws_to_clean):
+        for ws in ws_to_clean:
+            if AnalysisDataService.doesExist(ws):
+                AnalysisDataService.remove(ws)
+
+    def _load_workspace_with_monitors(self, monY, monE, dataY, dataE, as_dark_run = False):
+        filename = "LOQ48127np.nxs"
+        out_ws_name = "sans_workspace_test"
+        if as_dark_run:
+            out_ws_name = "dark_run_workspace_test"
+        
+        alg_load  = AlgorithmManager.createUnmanaged("LoadNexusProcessed")
+        alg_load.initialize()
+        alg_load.setChild(True)
+        alg_load.setProperty("Filename", filename)
+        alg_load.setProperty("OutputWorkspace", out_ws_name)
+        alg_load.execute()
+        ws = alg_load.getProperty("OutputWorkspace").value
+
+        if as_dark_run:
+            ws.setY(0,ws.dataY(0)*0.0 + monY)
+            ws.setE(0,ws.dataE(0)*0.0 + monE)
+            ws.setY(1,ws.dataY(1)*0.0 + monY)
+            ws.setE(1,ws.dataE(1)*0.0 + monE)
+
+            for element in range(2, ws.getNumberHistograms()):
+                ws.setY(element, ws.dataY(element)*0.0 + dataY)
+                ws.setE(element, ws.dataE(element)*0.0 + dataE)
+
+        else:
+            ws.setY(0,ws.dataY(0)*0.0 + monY)
+            ws.setE(0,ws.dataE(0)*0.0 + monE)
+            ws.setY(1,ws.dataY(1)*0.0 + monY)
+            ws.setE(1,ws.dataE(1)*0.0 + monE)
+
+            # Set the detector Y and E to 4 and 0.4
+            for element in range(2, ws.getNumberHistograms()):
+                ws.setY(element, ws.dataY(element)*0.0 + dataY)
+                ws.setE(element, ws.dataE(element)*0.0 + dataE)
+
+        return ws
+
+class DarkRunMonitorAndDetectorRemoverTest(unittest.TestCase):
+
+    def test_finds_all_monitor_indices_when_monitor_is_present(self):
+        # Arrange
+        test_ws = self._load_workspace_with_monitors()
+        ws = mtd[test_ws]
+        remover = DarkRunMonitorAndDetectorRemover()
+
+        # Act
+
+        indices = remover.find_monitor_workspace_indices(ws)
+
+        # Assert
+        ws_index, det_ids = zip(*indices)
+        self.assertEqual(len(indices), 2, "There should be two monitors")
+        self.assertEqual(ws_index[0], 0, "The first monitor should have a workspace index of 0")
+        self.assertEqual(ws_index[1], 1, "The second monitor should have a workspace index of 1")
+        self.assertEqual(det_ids[0], 1, "The first monitor should have a detector ID of 1")
+        self.assertEqual(det_ids[1], 2, "The second monitor should have a detector ID of 2")
+        # Clean up
+        ws_to_clean =[test_ws]
+        self._clean_up(ws_to_clean)
+
+    def test_find_no_monitors_when_no_monitors_are_present(self):
+        # Arrange
+        test_ws = self._load_workspace_without_monitors()
+        ws = mtd[test_ws]
+        remover = DarkRunMonitorAndDetectorRemover()
+
+        # Act
+        indices = remover.find_monitor_workspace_indices(ws)
+
+        # Assert
+        self.assertEqual(len(indices), 0, "There should be no monitors")
+
+        # Clean up
+        ws_to_clean =[test_ws]
+        self._clean_up(ws_to_clean)
+
+    def test_keep_all_monitors_discard_detectors(self):
+        # Arrange
+        test_ws = self._load_workspace_with_monitors()
+        ws = mtd[test_ws]
+        remover = DarkRunMonitorAndDetectorRemover()
+
+        dataY0_reference = np.copy(ws.dataY(0))
+        dataE0_reference = np.copy(ws.dataE(0))
+        dataY1_reference = np.copy(ws.dataY(1))
+        dataE1_reference = np.copy(ws.dataE(1))
+        number_histograms_reference = ws.getNumberHistograms()
+        zero_reference = dataY0_reference*0
+
+        # Act
+        monitor_selection = []
+        dark_run_corrected = remover.set_pure_monitor_dark_run(ws, monitor_selection)
+
+        # Assert
+        self.assertEqual(dark_run_corrected.getNumberHistograms(), number_histograms_reference,
+                         "The number of histograms should not have changed")
+
+        self._assert_items_are_equal(dark_run_corrected.dataY(0), dataY0_reference,
+                                     "First monitor Y data should not have changed")
+        self._assert_items_are_equal(dark_run_corrected.dataE(0), dataE0_reference,
+                                     "First monitor E data should not have changed")
+
+
+        self._assert_items_are_equal(dark_run_corrected.dataY(1), dataY1_reference,
+                                     "Second monitor Y data should not have changed")
+        self._assert_items_are_equal(dark_run_corrected.dataE(1), dataE1_reference,
+                                     "Second monitor E data should not have changed")
+
+        for element in range(2, dark_run_corrected.getNumberHistograms()):
+            self._assert_items_are_equal(dark_run_corrected.dataY(element), zero_reference,
+                                     "The Y data of non-monitor detectors should be 0")
+            self._assert_items_are_equal(dark_run_corrected.dataE(element), zero_reference,
+                                     "The E data of non-monitor detectors should be 0")
+
+        # Clean up
+        ws_to_clean = [test_ws]
+        self._clean_up(ws_to_clean)
+
+    def test_keep_all_detectors_discard_monitors(self):
+        # Arrange
+        test_ws = self._load_workspace_with_monitors()
+        ws = mtd[test_ws]
+        remover = DarkRunMonitorAndDetectorRemover()
+
+        ref_ws = ws.clone()
+        zero_reference = ref_ws.dataY(0)*0
+
+        # Act
+        monitor_selection = []
+        dark_run_corrected = remover.set_pure_detector_dark_run(ws)
+
+        # Assert
+        self.assertEqual(dark_run_corrected.getNumberHistograms(), ref_ws.getNumberHistograms(),
+                         "The number of histograms should not have changed")
+
+        self._assert_items_are_equal(dark_run_corrected.dataY(0), zero_reference,
+                                     "First monitor Y data should be 0")
+        self._assert_items_are_equal(dark_run_corrected.dataE(0), zero_reference,
+                                     "First monitor E data should be 0")
+
+        self._assert_items_are_equal(dark_run_corrected.dataY(1), zero_reference,
+                                     "Second monitor Y data should be 0")
+        self._assert_items_are_equal(dark_run_corrected.dataE(1),  zero_reference,
+                                     "Second monitor E data should be 0")
+
+        for element in range(2, dark_run_corrected.getNumberHistograms()):
+            self._assert_items_are_equal(dark_run_corrected.dataY(element), ref_ws.dataY(element),
+                                     "The Y data of non-monitor detectors should not have changed")
+            self._assert_items_are_equal(dark_run_corrected.dataE(element), ref_ws.dataE(element),
+                                     "The E data of non-monitor detectors should not have changed")
+
+        # Clean up
+        ws_to_clean = [test_ws, "ref_ws"]
+        self._clean_up(ws_to_clean)
+
+    def test_that_individual_monitors_can_be_selected(self):
+        # Arrange
+        test_ws = self._load_workspace_with_monitors()
+        ws = mtd[test_ws]
+        remover = DarkRunMonitorAndDetectorRemover()
+
+        zero_reference = np.copy(ws.dataY(0))*0
+        dataY0_reference = np.copy(ws.dataY(0))
+        dataE0_reference = np.copy(ws.dataE(0))
+        number_histograms_reference = ws.getNumberHistograms()
+
+        monitor_selection = [1] # We select the monitor with detector ID 1
+                                # which is workspace index 0 for this workspace
+
+        # Act
+        dark_run_corrected = remover.set_pure_monitor_dark_run(ws, monitor_selection)
+
+
+        # Assert
+        self.assertEqual(dark_run_corrected.getNumberHistograms(), number_histograms_reference,
+                         "The number of histograms should not have changed")
+
+        self._assert_items_are_equal(dark_run_corrected.dataY(0), dataY0_reference,
+                                     "First monitor Y data should be 0")
+
+        self._assert_items_are_equal(dark_run_corrected.dataE(0), dataE0_reference,
+                                     "First monitor E data should be 0")
+
+        self._assert_items_are_equal(dark_run_corrected.dataY(1), zero_reference,
+                                     "Second monitor Y data should not have changed")
+        self._assert_items_are_equal(dark_run_corrected.dataE(1),  zero_reference,
+                                     "Second monitor E data should not have changed")
+
+        for element in range(2, dark_run_corrected.getNumberHistograms()):
+            self._assert_items_are_equal(dark_run_corrected.dataY(element), zero_reference,
+                                     "The Y data of non-monitor detectors should be 0")
+            self._assert_items_are_equal(dark_run_corrected.dataE(element), zero_reference,
+                                     "The E data of non-monitor detectors should be 0")
+
+        # Clean up
+        ws_to_clean = [test_ws]
+        self._clean_up(ws_to_clean)
+
+    def test_that_throws_if_selection_does_not_match_available_monitor_list(self):
+        # Arrange
+        test_ws = self._load_workspace_with_monitors()
+        ws = mtd[test_ws]
+        remover = DarkRunMonitorAndDetectorRemover()
+
+        zero_reference = np.copy(ws.dataY(0))*0
+        dataY1_reference = np.copy(ws.dataY(1))
+        dataE1_reference = np.copy(ws.dataE(1))
+        number_histograms_reference = ws.getNumberHistograms()
+
+        monitor_selection = [0,2]
+
+        # Act+ Assert
+        args = [ws, monitor_selection]
+        dark_run_corrected = self.assertRaises(RuntimeError, remover.set_pure_monitor_dark_run, *args)
+
+        # Clean up
+        ws_to_clean = [test_ws]
+        self._clean_up(ws_to_clean)
+
+    def _load_workspace_with_monitors(self):
+        filename = "LOQ48127np.nxs"
+        out_ws_name = "dark_run_monitor_test_ws"
+        alg = run_algorithm(
+                    'LoadNexusProcessed',
+                    Filename= filename,
+                    OutputWorkspace = out_ws_name,
+                    rethrow = True)
+        return alg.getPropertyValue("OutputWorkspace")
+
+    def _load_workspace_without_monitors(self):
+        out_ws_name = "dark_run_monitor_test_ws"
+        alg = run_algorithm(
+                    'CreateSampleWorkspace',
+                    OutputWorkspace = out_ws_name,
+                    rethrow = True)
+        return alg.getPropertyValue("OutputWorkspace")
+
+    def _clean_up(self, ws_to_clean):
+        for ws in ws_to_clean:
+            if AnalysisDataService.doesExist(ws):
+                AnalysisDataService.remove(ws)
+
+    def _assert_items_are_equal(self, list1, list2, message):
+        # This method is needed since RHEL6 cannot handle assertItemsEqual
+        for index in range(0, len(list1)):
+            self.assertEqual(list1[index], list2[index], message)
+
+if __name__ == '__main__':
+    unittest.main()
\ No newline at end of file
diff --git a/docs/source/algorithms/SANSDarkRunBackgroundCorrection-v1.rst b/docs/source/algorithms/SANSDarkRunBackgroundCorrection-v1.rst
new file mode 100644
index 0000000000000000000000000000000000000000..23f3b97e82fb39accf2f51907e60ce3bb8b0b2dd
--- /dev/null
+++ b/docs/source/algorithms/SANSDarkRunBackgroundCorrection-v1.rst
@@ -0,0 +1,64 @@
+.. algorithm::
+
+.. summary::
+
+.. alias::
+
+.. properties::
+
+Description
+-----------
+
+This algorithm subtracts a dark run from a workspace. *InputWorkspace* and *DarkRun* have to
+be of type Workspace2D and need to contain the same spectra.
+The user can choose to either subtract spectra which are assoicated with detecors 
+(*ApplyToDetectors*) and/or monitors (*ApplyToMonitors*). In the case of monitors, the user can 
+select specific monitors (*SelectedMonitors*) according to their detecotor IDs.
+
+The *NormalizationRatio* is used to scale the signal values of the *DarkRun* workspace before
+subtraction.
+
+The background subtraction can be performed in several ways.
+
+* *Uniform* disabled: *DarkRun* is subtracted bin by bin from the *InputWorkspace*.
+* *Uniform* enabled: An average value for each spectra of the *DarkRun* is calculated. This average value is subtracted from the corresponding spectrum of the *InputWorkspace*. Note that *Mean* cannot be enabled when *Uniform* is disabled.
+* *Mean* enabled: This calculates an average over all spectra. This average is subtracted from all the spectra
+* *Mean* disabled: The subtraction happens for all spectra separately.
+
+Usage
+-----
+
+**Example - SANSDarkRunBackgroundCorrection for **
+
+.. testcode:: SANSDarkRunBackgroundCorrection
+
+    # Create sample workspaces. Note that the dark run is here the same as the sample run
+    ws_sample = CreateSampleWorkspace()
+    ws_dark_run = CloneWorkspace(ws_sample)
+
+    out_ws = SANSDarkRunBackgroundCorrection(InputWorkspace = ws_sample, 
+                                             DarkRun = ws_dark_run,
+                                             NormalizationRatio = 0.5,
+                                             Uniform = False,
+                                             Mean = False,
+                                             ApplyToDetectors = True,
+                                             ApplyToMonitors = False)
+
+    # We should have effectively halfed the data values
+    in_y = ws_sample.dataY(0)
+    out_y = out_ws.dataY(0)
+
+    print "The first bin of the first spectrum of the input was " + str(in_y[0])
+    print "After the dark run correction it is " + str(out_y[0])
+
+Output:
+
+.. testoutput:: SANSDarkRunBackgroundCorrection
+
+    The first bin of the first spectrum of the input was 0.3
+    After the dark run correction it is 0.15
+
+
+.. categories::
+
+.. sourcelink::