diff --git a/Framework/PythonInterface/plugins/algorithms/ReflectometrySliceEventWorkspace.py b/Framework/PythonInterface/plugins/algorithms/ReflectometrySliceEventWorkspace.py
index 1a69fbc5c77e60da1fca52eaad9c562cce4ee7f8..6c84838a89ecc788d0c2c591a38960f9a3457f6f 100644
--- a/Framework/PythonInterface/plugins/algorithms/ReflectometrySliceEventWorkspace.py
+++ b/Framework/PythonInterface/plugins/algorithms/ReflectometrySliceEventWorkspace.py
@@ -27,7 +27,8 @@ class ReflectometrySliceEventWorkspace(DataProcessorAlgorithm):
         # Add properties from child algorithm
         self._filter_properties = [
             'InputWorkspace', 'StartTime', 'StopTime','TimeInterval',
-            'LogName','MinimumLogValue','MaximumLogValue', 'LogValueInterval']
+            'LogName','MinimumLogValue','MaximumLogValue', 'LogValueInterval','LogBoundary',
+            'LogValueTolerance']
         self.copyProperties('GenerateEventsFilter', self._filter_properties)
 
         # Add our own properties
@@ -36,12 +37,12 @@ class ReflectometrySliceEventWorkspace(DataProcessorAlgorithm):
         self.declareProperty(WorkspaceGroupProperty('OutputWorkspace', '',
                                                     direction=Direction.Output),
                              doc='Group name for the output workspace(s).')
+        self.declareProperty("UseNewFilterAlgorithm", True, doc='If true, use the new FilterEvents algorithm instead of FilterByTime.')
 
     def PyExec(self):
         self._input_ws = self.getProperty("InputWorkspace").value
         self._output_ws_group_name = self.getPropertyValue("OutputWorkspace")
 
-        self._create_filter()
         output_ws_group = self._slice_input_workspace()
         self._scale_monitors_for_each_slice(output_ws_group)
         output_ws_group = self._rebin_to_monitors()
@@ -50,19 +51,25 @@ class ReflectometrySliceEventWorkspace(DataProcessorAlgorithm):
         self.setProperty("OutputWorkspace", self._output_ws_group_name)
         self._clean_up()
 
-    def _create_filter(self):
-        """Generate the splitter workspace for performing the filtering for each required slice"""
-        alg = self.createChildAlgorithm("GenerateEventsFilter")
-        for property in self._filter_properties:
-            alg.setProperty(property, self.getPropertyValue(property))
-        alg.setProperty("OutputWorkspace", '__split')
-        alg.setProperty("InformationWorkspace", '__info')
-        alg.execute()
-        self._split_ws = alg.getProperty("OutputWorkspace").value
-        self._info_ws = alg.getProperty("InformationWorkspace").value
-
     def _slice_input_workspace(self):
+        if self.getProperty("UseNewFilterAlgorithm").value:
+            return self._slice_input_workspace_with_filter_events()
+        elif self._slice_by_log():
+            return self._slice_input_workspace_with_filter_by_log_value()
+        else:
+            return self._slice_input_workspace_with_filter_by_time()
+
+    def _slice_by_log(self):
+        """Return true if we are slicing by log value"""
+        return self._property_set("LogName")
+
+    def _property_set(self, property_name):
+        """Return true if the given property is set"""
+        return not self.getProperty(property_name).isDefault
+
+    def _slice_input_workspace_with_filter_events(self):
         """Perform the slicing of the input workspace"""
+        self._create_filter()
         alg = self.createChildAlgorithm("FilterEvents")
         alg.setProperty("InputWorkspace", self._input_ws)
         alg.setProperty("SplitterWorkspace", self._split_ws)
@@ -83,10 +90,93 @@ class ReflectometrySliceEventWorkspace(DataProcessorAlgorithm):
         # sample logs as a string (FilterEvents converts it to a double).
         group = mtd[self._output_ws_group_name]
         for ws in group:
-            if ws.run().hasProperty('run_number'):
-                run_number = int(ws.run()['run_number'].value)
-                AddSampleLog(Workspace=ws, LogName='run_number', LogType='String',
-                             LogText=str(run_number))
+            self._copy_run_number_to_sample_log(ws, ws)
+        return group
+
+    def _create_filter(self):
+        """Generate the splitter workspace for performing the filtering for each required slice"""
+        alg = self.createChildAlgorithm("GenerateEventsFilter")
+        for property_name in self._filter_properties:
+            alg.setProperty(property_name, self.getPropertyValue(property_name))
+        alg.setProperty("OutputWorkspace", '__split')
+        alg.setProperty("InformationWorkspace", '__info')
+        alg.execute()
+        self._split_ws = alg.getProperty("OutputWorkspace").value
+        self._info_ws = alg.getProperty("InformationWorkspace").value
+
+    def _slice_input_workspace_with_filter_by_time(self):
+        # Get the start/stop times, or use the run start/stop times if they are not provided
+        run_start = DateAndTime(self._input_ws.run().startTime())
+        run_stop = DateAndTime(self._input_ws.run().endTime())
+        start_time = self._get_property_or_default_as_datetime("StartTime", default_value=run_start,
+                                                               relative_start=run_start)
+        stop_time = self._get_property_or_default_as_datetime("StopTime", default_value=run_stop,
+                                                              relative_start=run_start)
+        # Get the time interval, or use the total interval if it's not provided
+        total_interval = (stop_time - start_time).total_seconds()
+        time_interval = self._get_interval_as_float("TimeInterval", total_interval)
+        # Calculate start/stop times in seconds relative to the start of the run
+        relative_start_time = (start_time - run_start).total_seconds()
+        relative_stop_time = relative_start_time + total_interval
+        # Loop through each slice
+        slice_names = list()
+        slice_start_time = relative_start_time
+        while slice_start_time < relative_stop_time:
+            slice_stop_time = slice_start_time + time_interval
+            slice_name = self._output_ws_group_name + '_' + str(slice_start_time) + '_' + str(slice_stop_time)
+            slice_names.append(slice_name)
+            alg = self.createChildAlgorithm("FilterByTime")
+            alg.setProperty("InputWorkspace", self._input_ws)
+            alg.setProperty("OutputWorkspace", slice_name)
+            alg.setProperty("StartTime", str(slice_start_time))
+            alg.setProperty("StopTime", str(slice_stop_time))
+            alg.execute()
+            sliced_workspace = alg.getProperty("OutputWorkspace").value
+            mtd.addOrReplace(slice_name, sliced_workspace)
+            # Proceed to the next interval
+            slice_start_time = slice_stop_time
+        # Group the sliced workspaces
+        group = self._group_workspaces(slice_names, self._output_ws_group_name)
+        mtd.addOrReplace(self._output_ws_group_name, group)
+        # Ensure the run number for the child workspaces is stored in the
+        # sample logs as a string (FilterEvents converts it to a double).
+        for ws in group:
+            self._copy_run_number_to_sample_log(ws, ws)
+        return group
+
+    def _slice_input_workspace_with_filter_by_log_value(self):
+        # Get the min/max log value, or use the values from the sample logs if they're not provided
+        log_name = self.getProperty("LogName").value
+        run_log_start = min(self._input_ws.run().getProperty(log_name).value)
+        run_log_stop = max(self._input_ws.run().getProperty(log_name).value)
+        log_min = self._get_property_or_default("MinimumLogValue", run_log_start)
+        log_max = self._get_property_or_default("MaximumLogValue", run_log_stop)
+        log_interval = self._get_interval_as_float("LogValueInterval", log_max - log_min)
+        slice_names = list()
+        slice_start_value = log_min
+        while slice_start_value < log_max:
+            slice_stop_value = slice_start_value + log_interval
+            slice_name = self._output_ws_group_name + '_' + str(slice_start_value) + '_' + str(slice_stop_value)
+            slice_names.append(slice_name)
+            alg = self.createChildAlgorithm("FilterByLogValue")
+            alg.setProperty("InputWorkspace", self._input_ws)
+            alg.setProperty("OutputWorkspace", slice_name)
+            alg.setProperty("LogName", log_name)
+            alg.setProperty("LogBoundary", self.getProperty("LogBoundary").value)
+            alg.setProperty("MinimumValue", slice_start_value)
+            alg.setProperty("MaximumValue", slice_stop_value)
+            alg.execute()
+            sliced_workspace = alg.getProperty("OutputWorkspace").value
+            mtd.addOrReplace(slice_name, sliced_workspace)
+            # Proceed to the next interval
+            slice_start_value = slice_stop_value
+        # Group the sliced workspaces
+        group = self._group_workspaces(slice_names, self._output_ws_group_name)
+        mtd.addOrReplace(self._output_ws_group_name, group)
+        # Ensure the run number for the child workspaces is stored in the
+        # sample logs as a string (FilterEvents converts it to a double).
+        for ws in group:
+            self._copy_run_number_to_sample_log(ws, ws)
         return group
 
     def _scale_monitors_for_each_slice(self, sliced_ws_group):
@@ -102,9 +192,10 @@ class ReflectometrySliceEventWorkspace(DataProcessorAlgorithm):
             scale_factor = slice.run().getProtonCharge() / total_proton_charge
             slice_monitor_ws = self._scale_workspace(slice_monitor_ws, slice_monitor_ws_name,
                                                      scale_factor)
-            # The workspace must be in the ADS for grouping
+            # The workspace must be in the ADS for grouping and updating the sample log
             mtd.addOrReplace(slice_monitor_ws_name, slice_monitor_ws)
             monitors_ws_list.append(slice_monitor_ws_name)
+            self._copy_run_number_to_sample_log(slice, slice_monitor_ws)
             i+=1
 
         self._monitor_ws_group_name = input_monitor_ws.name() + '_sliced'
@@ -152,7 +243,7 @@ class ReflectometrySliceEventWorkspace(DataProcessorAlgorithm):
         alg = self.createChildAlgorithm("AppendSpectra")
         alg.setProperty("InputWorkspace1", self._monitor_ws_group_name)
         alg.setProperty("InputWorkspace2", self._output_ws_group_name)
-        alg.setProperty("MergeLogs", True)
+        alg.setProperty("MergeLogs", False)
         alg.setProperty("OutputWorkspace", self._output_ws_group_name)
         alg.execute()
         return alg.getProperty("OutputWorkspace").value
@@ -166,4 +257,47 @@ class ReflectometrySliceEventWorkspace(DataProcessorAlgorithm):
         for ws_name in monitor_ws_names:
             mtd.remove(ws_name)
 
+    def _get_property_or_default(self, property_name, default_value):
+        """Get a property value. Return the given default value if the property is not set."""
+        if self.getProperty(property_name).isDefault:
+            return default_value
+        else:
+            return self.getProperty(property_name).value
+
+    def _get_property_or_default_as_datetime(self, property_name, default_value, relative_start):
+        """Get a property value as a DateAndTime. Return the given default value if the property is not set.
+        If the property is in datetime format, return it directly. Otherwise if it is in seconds, then convert
+        it to a datetime by adding it to the given relative_start time."""
+        if self.getProperty(property_name).isDefault:
+            return default_value
+        else:
+            value = self.getProperty(property_name).value
+            try:
+                result = DateAndTime(value)
+            except:
+                value_ns = int(value) * 1000000000
+                result = relative_start + value_ns
+            return result
+
+    def _copy_run_number_to_sample_log(self, ws_with_run_number, ws_to_update):
+        if ws_with_run_number.run().hasProperty('run_number'):
+            run_number = int(ws_with_run_number.run()['run_number'].value)
+            AddSampleLog(Workspace=ws_to_update, LogName='run_number', LogType='String',
+                         LogText=str(run_number))
+
+    def _get_interval_as_float(self, property_name, default_value):
+        """Get an interval property value (could be time interval or log value interval)
+        as a float. Checks if the user has entered a list of floats and for now throws
+        if this is the case (this is only used in backwards compatibility mode and multiple
+        intervals are not currently supported in that mode)"""
+        if self.getProperty(property_name).isDefault:
+            return float(default_value)
+        value_as_string = self.getPropertyValue(property_name)
+        value_as_list = value_as_string.split(',')
+        if len(value_as_list) > 1:
+            raise RuntimeError("Multiple intervals are not currently supported if UseNewFilterAlgorithm is False")
+        if len(value_as_list) < 1:
+            raise RuntimeError("Interval was not specified")
+        return float(value_as_list[0])
+
 AlgorithmFactory.subscribe(ReflectometrySliceEventWorkspace())
diff --git a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/ReflectometryISISLoadAndProcess.py b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/ReflectometryISISLoadAndProcess.py
index 589418260a97d4aed8921d06160a2a63c2994f03..31afb2d9033818ce4f4987277e05655448fa11f3 100644
--- a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/ReflectometryISISLoadAndProcess.py
+++ b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/ReflectometryISISLoadAndProcess.py
@@ -136,7 +136,7 @@ class ReflectometryISISLoadAndProcess(DataProcessorAlgorithm):
         self.declareProperty(Prop.SLICE, False, doc='If true, slice the input workspace')
         whenSliceEnabled = EnabledWhenProperty(Prop.SLICE, PropertyCriterion.IsEqualTo, "1")
 
-        self._slice_properties = ['TimeInterval', 'LogName', 'LogValueInterval']
+        self._slice_properties = ['TimeInterval', 'LogName', 'LogValueInterval', 'UseNewFilterAlgorithm']
         self.copyProperties('ReflectometrySliceEventWorkspace', self._slice_properties)
         for property in self._slice_properties:
             self.setPropertySettings(property, whenSliceEnabled)
diff --git a/Framework/PythonInterface/test/python/plugins/algorithms/ReflectometrySliceEventWorkspaceTest.py b/Framework/PythonInterface/test/python/plugins/algorithms/ReflectometrySliceEventWorkspaceTest.py
index 3041d705cd1dce0c275fc8c258ec6e5372296ef0..75905175023d82c99413987d653ce6dc3498d9d6 100644
--- a/Framework/PythonInterface/test/python/plugins/algorithms/ReflectometrySliceEventWorkspaceTest.py
+++ b/Framework/PythonInterface/test/python/plugins/algorithms/ReflectometrySliceEventWorkspaceTest.py
@@ -14,7 +14,7 @@ from mantid.simpleapi import *
 from testhelpers import (assertRaisesNothing, create_algorithm)
 
 
-class ReflectometrySliceEventWorkspace(unittest.TestCase):
+class ReflectometrySliceEventWorkspaceTest(unittest.TestCase):
     def setUp(self):
         self.__class__._input_ws = self._create_test_workspace()
         self.__class__._input_ws_group = self._create_test_workspace_group()
@@ -23,7 +23,8 @@ class ReflectometrySliceEventWorkspace(unittest.TestCase):
         self._default_args = {
             'InputWorkspace' : 'input_ws',
             'MonitorWorkspace' : 'monitor_ws',
-            'OutputWorkspace': 'output'
+            'OutputWorkspace': 'output',
+            'UseNewFilterAlgorithm': True
         }
 
     def tearDown(self):
@@ -41,24 +42,44 @@ class ReflectometrySliceEventWorkspace(unittest.TestCase):
 
     def test_default_inputs_return_single_slice(self):
         output = self._assert_run_algorithm_succeeds(self._default_args)
-        self.assertEqual(output.getNumberOfEntries(), 1)
-        first_slice = output[0]
-        self.assertEqual(first_slice.getNumberHistograms(), 5)
-        self.assertEqual(first_slice.dataX(0).size, 101)
-        self._assert_delta(first_slice.dataY(3)[0], 14)
-        self._assert_delta(first_slice.dataY(3)[51], 16)
-        self._assert_delta(first_slice.dataY(3)[99], 8)
+        self._check_slices(output, ["output_0_4200"])
+        self._check_y(output, child=0, spec=3, expected_bins=101, expected_values=[14, 16, 8])
+
+    def test_default_inputs_return_single_slice_FilterByTime(self):
+        args = self._default_args
+        args['UseNewFilterAlgorithm'] = False
+        output = self._assert_run_algorithm_succeeds(args)
+        self._check_slices(output, ["output_0_3600.0"])
+        self._check_y(output, child=0, spec=3, expected_bins=101, expected_values=[14, 16, 8])
 
     def test_setting_time_interval(self):
         args = self._default_args
         args['TimeInterval'] = 600
         output = self._assert_run_algorithm_succeeds(args)
-        self.assertEqual(output.getNumberOfEntries(), 7)
-        first_slice = output[0]
-        self.assertEqual(first_slice.dataX(0).size, 101)
-        self._assert_delta(first_slice.dataY(3)[0], 2)
-        self._assert_delta(first_slice.dataY(3)[51], 6)
-        self._assert_delta(first_slice.dataY(3)[99], 1)
+        self._check_slices(output, ["output_0_600", "output_600_1200", "output_1200_1800",
+                                    "output_1800_2400", "output_2400_3000", "output_3000_3600",
+                                    "output_3600_4200"])
+        self._check_y(output, child=0, spec=3, expected_bins=101, expected_values=[2, 6, 1])
+        self._check_y(output, child=1, spec=3, expected_bins=101, expected_values=[2, 3, 2])
+        self._check_y(output, child=2, spec=3, expected_bins=101, expected_values=[0, 3, 0])
+        self._check_y(output, child=3, spec=3, expected_bins=101, expected_values=[4, 2, 2])
+        self._check_y(output, child=4, spec=3, expected_bins=101, expected_values=[4, 1, 2])
+        self._check_y(output, child=5, spec=3, expected_bins=101, expected_values=[2, 1, 1])
+        self._check_y(output, child=6, spec=3, expected_bins=101, expected_values=[0, 0, 0])
+
+    def test_setting_time_interval_FilterByTime(self):
+        args = self._default_args
+        args['TimeInterval'] = 600
+        args['UseNewFilterAlgorithm'] = False
+        output = self._assert_run_algorithm_succeeds(args)
+        self._check_slices(output, ["output_0_600.0", "output_600.0_1200.0", "output_1200.0_1800.0",
+                                    "output_1800.0_2400.0", "output_2400.0_3000.0", "output_3000.0_3600.0"])
+        self._check_y(output, child=0, spec=3, expected_bins=101, expected_values=[2, 6, 1])
+        self._check_y(output, child=1, spec=3, expected_bins=101, expected_values=[2, 3, 2])
+        self._check_y(output, child=2, spec=3, expected_bins=101, expected_values=[0, 3, 0])
+        self._check_y(output, child=3, spec=3, expected_bins=101, expected_values=[4, 2, 2])
+        self._check_y(output, child=4, spec=3, expected_bins=101, expected_values=[4, 1, 2])
+        self._check_y(output, child=5, spec=3, expected_bins=101, expected_values=[2, 1, 1])
 
     def test_setting_time_interval_and_limits(self):
         args = self._default_args
@@ -66,24 +87,134 @@ class ReflectometrySliceEventWorkspace(unittest.TestCase):
         args['StartTime'] = '1800'
         args['StopTime'] = '3300'
         output = self._assert_run_algorithm_succeeds(args)
-        self.assertEqual(output.getNumberOfEntries(), 3)
-        first_slice = output[0]
-        self.assertEqual(first_slice.dataX(0).size, 101)
-        self._assert_delta(first_slice.dataY(3)[0], 4)
-        self._assert_delta(first_slice.dataY(3)[51], 2)
-        self._assert_delta(first_slice.dataY(3)[99], 2)
+        self._check_slices(output, ["output_1800_2400", "output_2400_3000", "output_3000_3300"])
+        self._check_y(output, child=0, spec=3, expected_bins=101, expected_values=[4, 2, 2])
+        self._check_y(output, child=1, spec=3, expected_bins=101, expected_values=[4, 1, 2])
+        self._check_y(output, child=2, spec=3, expected_bins=101, expected_values=[1, 1, 0])
+
+    def test_setting_time_interval_and_limits_FilterByTime(self):
+        args = self._default_args
+        args['TimeInterval'] = 600
+        args['StartTime'] = '1800'
+        args['StopTime'] = '3300'
+        args['UseNewFilterAlgorithm'] = False
+        output = self._assert_run_algorithm_succeeds(args)
+        # This filters up to 3600, which looks less correct than the new algorithm which cuts
+        # off at the requested 3300
+        self._check_slices(output, ["output_1800_2400.0", "output_2400.0_3000.0", "output_3000.0_3600.0"])
+        self._check_y(output, child=0, spec=3, expected_bins=101, expected_values=[4, 2, 2])
+        self._check_y(output, child=1, spec=3, expected_bins=101, expected_values=[4, 1, 2])
+        self._check_y(output, child=2, spec=3, expected_bins=101, expected_values=[2, 1, 1])
+
+    def test_setting_multiple_time_intervals(self):
+        args = self._default_args
+        args['TimeInterval'] = '600, 1200'
+        args['StopTime'] = '3600'
+        output = self._assert_run_algorithm_succeeds(args)
+        self._check_slices(output, ["output_0_600", "output_600_1800", "output_1800_2400", "output_2400_3600"])
+        self._check_y(output, child=0, spec=3, expected_bins=101, expected_values=[2, 6, 1])
+        self._check_y(output, child=1, spec=3, expected_bins=101, expected_values=[2, 6, 2])
+        self._check_y(output, child=2, spec=3, expected_bins=101, expected_values=[4, 2, 2])
+        self._check_y(output, child=3, spec=3, expected_bins=101, expected_values=[6, 2, 3])
+
+    def test_setting_multiple_time_intervals_is_not_implemented_for_FilterByTime(self):
+        args = self._default_args
+        args['TimeInterval'] = '600, 1200'
+        args['StopTime'] = '3600'
+        args['UseNewFilterAlgorithm'] = False
+        output = self._assert_run_algorithm_fails(args)
 
     def test_setting_log_interval_without_log_name_produces_single_slice(self):
         args = self._default_args
         args['LogValueInterval'] = 600
         output = self._assert_run_algorithm_succeeds(args)
-        self.assertEqual(output.getNumberOfEntries(), 1)
-        first_slice = output[0]
-        self.assertEqual(first_slice.getNumberHistograms(), 5)
-        self.assertEqual(first_slice.dataX(0).size, 101)
-        self._assert_delta(first_slice.dataY(3)[0], 14)
-        self._assert_delta(first_slice.dataY(3)[51], 16)
-        self._assert_delta(first_slice.dataY(3)[99], 8)
+        self._check_slices(output, ["output_0_4200"])
+        self._check_y(output, child=0, spec=3, expected_bins=101, expected_values=[14, 16, 8])
+
+    def test_setting_log_interval_without_log_name_produces_single_slice_FilterByLogValue(self):
+        args = self._default_args
+        args['LogValueInterval'] = 600
+        args['UseNewFilterAlgorithm'] = False
+        output = self._assert_run_algorithm_succeeds(args)
+        self._check_slices(output, ["output_0_3600.0"])
+        self._check_y(output, child=0, spec=3, expected_bins=101, expected_values=[14, 16, 8])
+
+    def test_setting_log_interval(self):
+        args = self._default_args
+        args['LogName'] = 'proton_charge'
+        args['LogValueInterval'] = 20
+        output = self._assert_run_algorithm_succeeds(args)
+        # Note that default tolerance is half the interval, so we slice +/-10 either side.
+        # Also note that empty slices are not included in the output.
+        self._check_slices(output, ["output_Log.proton_charge.From.10.To.30.Value-change-direction:both",
+                                    "output_Log.proton_charge.From.70.To.90.Value-change-direction:both",
+                                    "output_Log.proton_charge.From.90.To.110.Value-change-direction:both"])
+        self._check_y(output, child=0, spec=3, expected_bins=101, expected_values=[4, 1, 2])
+        self._check_y(output, child=1, spec=3, expected_bins=101, expected_values=[4, 5, 2])
+        self._check_y(output, child=2, spec=3, expected_bins=101, expected_values=[6, 10, 4])
+
+    def test_setting_log_tolerance(self):
+        args = self._default_args
+        args['LogName'] = 'proton_charge'
+        args['LogValueInterval'] = 20
+        # Set tolerance to zero to give similar behaviour to FilterByLogValue, although
+        # note that empty slices are not output so we have fewer workspaces.
+        args['LogValueTolerance'] = 0
+        output = self._assert_run_algorithm_succeeds(args)
+        self._check_slices(output, ["output_Log.proton_charge.From.0.To.20.Value-change-direction:both",
+                                    "output_Log.proton_charge.From.80.To.100.Value-change-direction:both"])
+        self._check_y(output, child=0, spec=3, expected_bins=101, expected_values=[4, 1, 2])
+        self._check_y(output, child=1, spec=3, expected_bins=101, expected_values=[4, 5, 2])
+
+    def test_setting_log_interval_FilterByLogValue(self):
+        args = self._default_args
+        args['LogName'] = 'proton_charge'
+        args['LogValueInterval'] = 20
+        args['MinimumLogValue'] = 0
+        args['MaximumLogValue'] = 100
+        args['UseNewFilterAlgorithm'] = False
+        output = self._assert_run_algorithm_succeeds(args)
+        self._check_slices(output, ["output_0.0_20.0", "output_20.0_40.0", "output_40.0_60.0",
+                                    "output_60.0_80.0", "output_80.0_100.0"])
+        self._check_y(output, child=0, spec=3, expected_bins=101, expected_values=[0, 0, 0])
+        self._check_y(output, child=1, spec=3, expected_bins=101, expected_values=[0, 0, 0])
+        self._check_y(output, child=2, spec=3, expected_bins=101, expected_values=[0, 0, 0])
+        self._check_y(output, child=3, spec=3, expected_bins=101, expected_values=[0, 3, 0])
+        self._check_y(output, child=4, spec=3, expected_bins=101, expected_values=[4, 12, 3])
+
+    def test_setting_log_without_interval_produces_single_slice(self):
+        args = self._default_args
+        args['LogName'] = 'proton_charge'
+        output = self._assert_run_algorithm_succeeds(args)
+        self._check_slices(output, ["output_Log.proton_charge.From.0.To.100.Value-change-direction:both"])
+        # Note that the min/max log value are 0->100 (taken from the sample logs) but this is
+        # exclusive of values at 100 so excludes quite a few counts
+        self._check_y(output, child=0, spec=3, expected_bins=101, expected_values=[8, 6, 4])
+
+    def test_setting_log_limits_without_interval_produces_single_slice(self):
+        args = self._default_args
+        args['LogName'] = 'proton_charge'
+        args['MinimumLogValue'] = 0
+        args['MaximumLogValue'] = 101
+        output = self._assert_run_algorithm_succeeds(args)
+        # We set the max to be over 100 to be inclusive of the values up to 100 so this includes all
+        # of the counts from the input workspace
+        self._check_slices(output, ["output_Log.proton_charge.From.0.To.101.Value-change-direction:both"])
+        self._check_y(output, child=0, spec=3, expected_bins=101, expected_values=[14, 16, 8])
+
+    def test_setting_log_limits_without_interval_produces_single_slice_FilterByLogValue(self):
+        args = self._default_args
+        args['LogName'] = 'proton_charge'
+        args['MinimumLogValue'] = 0
+        args['MaximumLogValue'] = 101
+        args['UseNewFilterAlgorithm'] = False
+        output = self._assert_run_algorithm_succeeds(args)
+        self._check_slices(output, ["output_0.0_101.0"])
+        # These values don't seem right - I think they should contain all the counts from the
+        # input workspace, i.e. [14, 16, 8]. Adding this test though to confirm the current
+        # behaviour so we can check against it if we fix this in future. We may be phasing this
+        # algorithm out though so this is not currently a high priority.
+        self._check_y(output, child=0, spec=3, expected_bins=101, expected_values=[12, 15, 7])
 
     def test_setting_log_interval_and_limits(self):
         args = self._default_args
@@ -92,41 +223,110 @@ class ReflectometrySliceEventWorkspace(unittest.TestCase):
         args['MinimumLogValue'] = '75'
         args['MaximumLogValue'] = '110'
         output = self._assert_run_algorithm_succeeds(args)
-        self.assertEqual(output.getNumberOfEntries(), 2)
-        first_slice = output[0]
-        self.assertEqual(first_slice.dataX(0).size, 101)
-        self._assert_delta(first_slice.dataY(3)[0], 4)
-        self._assert_delta(first_slice.dataY(3)[51], 5)
-        self._assert_delta(first_slice.dataY(3)[99], 2)
+        self._check_slices(output, ["output_Log.proton_charge.From.65.To.85.Value-change-direction:both",
+                                    "output_Log.proton_charge.From.85.To.105.Value-change-direction:both"])
+        self._check_y(output, child=0, spec=3, expected_bins=101, expected_values=[4, 5, 2])
+        self._check_y(output, child=1, spec=3, expected_bins=101, expected_values=[6, 10, 4])
+
+    def test_setting_log_interval_and_limits_FilterByLogValue(self):
+        args = self._default_args
+        args['LogName'] = 'proton_charge'
+        args['LogValueInterval'] = 20
+        args['MinimumLogValue'] = '75'
+        args['MaximumLogValue'] = '110'
+        args['UseNewFilterAlgorithm'] = False
+        output = self._assert_run_algorithm_succeeds(args)
+        self._check_slices(output, ["output_75.0_95.0", "output_95.0_115.0"])
+        self._check_y(output, child=0, spec=3, expected_bins=101, expected_values=[0, 3, 0])
+        self._check_y(output, child=1, spec=3, expected_bins=101, expected_values=[2, 6, 1])
 
     def test_when_input_is_a_workspace_group(self):
         args = self._default_args
         args['TimeInterval'] = 600
         args['InputWorkspace'] = 'input_ws_group'
-        output = self._assert_run_algorithm_succeeds(args)
-        self.assertEqual(output.getNumberOfEntries(), 3)
-        first_subgroup = output[0]
-        self.assertEqual(first_subgroup.getNumberOfEntries(), 7)
-        first_slice = first_subgroup[0]
-        self.assertEqual(first_slice.dataX(0).size, 101)
-        self._assert_delta(first_slice.dataY(3)[0], 2)
-        self._assert_delta(first_slice.dataY(3)[51], 6)
-        self._assert_delta(first_slice.dataY(3)[99], 1)
+        group = self._assert_run_algorithm_succeeds(args)
+        self.assertEqual(group.getNumberOfEntries(), 3)
+        output = group[0]
+        self._check_slices(output, ["ws1_monitor_ws_output_0_600",
+                                    "ws1_monitor_ws_output_600_1200",
+                                    "ws1_monitor_ws_output_1200_1800",
+                                    "ws1_monitor_ws_output_1800_2400",
+                                    "ws1_monitor_ws_output_2400_3000",
+                                    "ws1_monitor_ws_output_3000_3600",
+                                    "ws1_monitor_ws_output_3600_4200"])
+        self._check_y(output, child=0, spec=3, expected_bins=101, expected_values=[2, 6, 1])
+        self._check_y(output, child=1, spec=3, expected_bins=101, expected_values=[2, 3, 2])
+        self._check_y(output, child=2, spec=3, expected_bins=101, expected_values=[0, 3, 0])
+        self._check_y(output, child=3, spec=3, expected_bins=101, expected_values=[4, 2, 2])
+        self._check_y(output, child=4, spec=3, expected_bins=101, expected_values=[4, 1, 2])
+        self._check_y(output, child=5, spec=3, expected_bins=101, expected_values=[2, 1, 1])
+        self._check_y(output, child=6, spec=3, expected_bins=101, expected_values=[0, 0, 0])
+
+    def test_when_input_is_a_workspace_group_FilterByTime(self):
+        args = self._default_args
+        args['TimeInterval'] = 600
+        args['InputWorkspace'] = 'input_ws_group'
+        args['UseNewFilterAlgorithm'] = False
+        group = self._assert_run_algorithm_succeeds(args)
+        self.assertEqual(group.getNumberOfEntries(), 3)
+        output = group[0]
+        self._check_slices(output, ["ws1_monitor_ws_output_0_600.0",
+                                    "ws1_monitor_ws_output_600.0_1200.0",
+                                    "ws1_monitor_ws_output_1200.0_1800.0",
+                                    "ws1_monitor_ws_output_1800.0_2400.0",
+                                    "ws1_monitor_ws_output_2400.0_3000.0",
+                                    "ws1_monitor_ws_output_3000.0_3600.0"])
+        self._check_y(output, child=0, spec=3, expected_bins=101, expected_values=[2, 6, 1])
+        self._check_y(output, child=1, spec=3, expected_bins=101, expected_values=[2, 3, 2])
+        self._check_y(output, child=2, spec=3, expected_bins=101, expected_values=[0, 3, 0])
+        self._check_y(output, child=3, spec=3, expected_bins=101, expected_values=[4, 2, 2])
+        self._check_y(output, child=4, spec=3, expected_bins=101, expected_values=[4, 1, 2])
+        self._check_y(output, child=5, spec=3, expected_bins=101, expected_values=[2, 1, 1])
 
     def test_when_input_and_monitors_are_both_workspace_groups(self):
         args = self._default_args
         args['TimeInterval'] = 600
         args['InputWorkspace'] = 'input_ws_group'
         args['MonitorWorkspace'] = 'monitor_ws_group'
-        output = self._assert_run_algorithm_succeeds(args)
-        self.assertEqual(output.getNumberOfEntries(), 3)
-        first_subgroup = output[0]
-        self.assertEqual(first_subgroup.getNumberOfEntries(), 7)
-        first_slice = first_subgroup[0]
-        self.assertEqual(first_slice.dataX(0).size, 101)
-        self._assert_delta(first_slice.dataY(3)[0], 2)
-        self._assert_delta(first_slice.dataY(3)[51], 6)
-        self._assert_delta(first_slice.dataY(3)[99], 1)
+        group = self._assert_run_algorithm_succeeds(args)
+        self.assertEqual(group.getNumberOfEntries(), 3)
+        output = group[0]
+        self._check_slices(output, ["ws1_mon1_output_0_600",
+                                    "ws1_mon1_output_600_1200",
+                                    "ws1_mon1_output_1200_1800",
+                                    "ws1_mon1_output_1800_2400",
+                                    "ws1_mon1_output_2400_3000",
+                                    "ws1_mon1_output_3000_3600",
+                                    "ws1_mon1_output_3600_4200"])
+        self._check_y(output, child=0, spec=3, expected_bins=101, expected_values=[2, 6, 1])
+        self._check_y(output, child=1, spec=3, expected_bins=101, expected_values=[2, 3, 2])
+        self._check_y(output, child=2, spec=3, expected_bins=101, expected_values=[0, 3, 0])
+        self._check_y(output, child=3, spec=3, expected_bins=101, expected_values=[4, 2, 2])
+        self._check_y(output, child=4, spec=3, expected_bins=101, expected_values=[4, 1, 2])
+        self._check_y(output, child=5, spec=3, expected_bins=101, expected_values=[2, 1, 1])
+        self._check_y(output, child=6, spec=3, expected_bins=101, expected_values=[0, 0, 0])
+
+    def test_when_input_and_monitors_are_both_workspace_groups_FilterByTime(self):
+        args = self._default_args
+        args['TimeInterval'] = 600
+        args['InputWorkspace'] = 'input_ws_group'
+        args['MonitorWorkspace'] = 'monitor_ws_group'
+        args['UseNewFilterAlgorithm'] = False
+        group = self._assert_run_algorithm_succeeds(args)
+        self.assertEqual(group.getNumberOfEntries(), 3)
+        output = group[0]
+        self._check_slices(output, ["ws1_mon1_output_0_600.0",
+                                    "ws1_mon1_output_600.0_1200.0",
+                                    "ws1_mon1_output_1200.0_1800.0",
+                                    "ws1_mon1_output_1800.0_2400.0",
+                                    "ws1_mon1_output_2400.0_3000.0",
+                                    "ws1_mon1_output_3000.0_3600.0"])
+        self._check_y(output, child=0, spec=3, expected_bins=101, expected_values=[2, 6, 1])
+        self._check_y(output, child=1, spec=3, expected_bins=101, expected_values=[2, 3, 2])
+        self._check_y(output, child=2, spec=3, expected_bins=101, expected_values=[0, 3, 0])
+        self._check_y(output, child=3, spec=3, expected_bins=101, expected_values=[4, 2, 2])
+        self._check_y(output, child=4, spec=3, expected_bins=101, expected_values=[4, 1, 2])
+        self._check_y(output, child=5, spec=3, expected_bins=101, expected_values=[2, 1, 1])
 
     def test_fails_when_input_groups_are_different_sizes(self):
         group = self._create_monitor_workspace_group_with_two_members()
@@ -160,7 +360,7 @@ class ReflectometrySliceEventWorkspace(unittest.TestCase):
 
     def _create_monitor_workspace(self):
         monitor_ws = CreateSampleWorkspace(OutputWorkspace='monitor_ws', NumBanks=0, NumMonitors=3,
-                                           BankPixelWidth=1, NumEvents=10000, Random=True)
+                                           BankPixelWidth=1, NumEvents=10000, Random=False)
         return monitor_ws
 
     def _create_monitor_workspace_group(self):
@@ -206,6 +406,20 @@ class ReflectometrySliceEventWorkspace(unittest.TestCase):
             throws = True
         self.assertEqual(throws, True)
 
+    def _check_slices(self, workspace_group, expected_names):
+        number_of_slices = workspace_group.getNumberOfEntries()
+        self.assertEqual(number_of_slices, len(expected_names))
+        for child in range(number_of_slices):
+            self.assertEqual(workspace_group[child].name(), expected_names[child])
+
+    def _check_y(self, workspace_group, child, spec, expected_bins, expected_values):
+        """Check Y values for bins 0, 51 and 99 match the list of expected values"""
+        ws = workspace_group[child]
+        self.assertEqual(ws.dataX(spec).size, expected_bins)
+        self._assert_delta(ws.dataY(spec)[0], expected_values[0])
+        self._assert_delta(ws.dataY(spec)[51], expected_values[1])
+        self._assert_delta(ws.dataY(spec)[99], expected_values[2])
+
     def _assert_delta(self, value1, value2):
         self.assertEqual(round(value1, 6), round(value2, 6))
 
diff --git a/Testing/Data/SystemTest/INTER/ISISReflectometryEventTestRuns.nxs.md5 b/Testing/Data/SystemTest/INTER/ISISReflectometryEventTestRuns.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..6ca9f23c8244f24721e88d5e55a460f04a51c819
--- /dev/null
+++ b/Testing/Data/SystemTest/INTER/ISISReflectometryEventTestRuns.nxs.md5
@@ -0,0 +1 @@
+60769b9dee2504c0e42d1249ab410a10
diff --git a/Testing/Data/SystemTest/INTER/ISISReflectometryTestRuns.nxs.md5 b/Testing/Data/SystemTest/INTER/ISISReflectometryTestRuns.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..36e574f7801eb06d159a97ecc059259bd843f879
--- /dev/null
+++ b/Testing/Data/SystemTest/INTER/ISISReflectometryTestRuns.nxs.md5
@@ -0,0 +1 @@
+b97588f07efda1688f8f5ed841cbe610
diff --git a/Testing/SystemTests/tests/analysis/INTERReductionTest.py b/Testing/SystemTests/tests/analysis/INTERReductionTest.py
index 0178a2bde2cc17ea60fb675f2e1ce0a7eaf14aa9..2834509c7a650b837b6f6801c82d32e131d732cb 100644
--- a/Testing/SystemTests/tests/analysis/INTERReductionTest.py
+++ b/Testing/SystemTests/tests/analysis/INTERReductionTest.py
@@ -8,13 +8,11 @@
 System Test for ISIS Reflectometry reduction
 Adapted from scripts provided by Max Skoda.
 """
-from __future__ import (print_function)
+from ISISReflectometryWorkflowBase import *
 import systemtesting
-from mantid.simpleapi import *
-from mantid import ConfigService
 
 
-class INTERReductionTest(systemtesting.MantidSystemTest):
+class INTERReductionTest(systemtesting.MantidSystemTest, ISISReflectometryWorkflowBase):
     '''
     Mantid Test Script for INTER:
 
@@ -24,17 +22,15 @@ class INTERReductionTest(systemtesting.MantidSystemTest):
     3. Scripted fitting of reduced data for NRW
     4. Linear detector reduction
     '''
-    # Note: you may find the regenerate functions useful.
 
-    event_run_numbers = [45222]
     run_numbers = [45222, 45223, 45224, 44984, 44985, 44990, 44991]
-    first_transmission_run_names = ['45226', '44988', '44986']
-    second_transmission_run_names = ['45227', '44989', '44987']
+    event_run_numbers = [45222]
+    first_transmission_runs = ['45226', '44988', '44986']
+    second_transmission_runs = ['45227', '44989', '44987']
     transmission_workspace_names = ['TRANS', 'TRANS_SM', 'TRANS_NoSM']
-    runs_file = 'INTERReductionTestRuns.nxs'
-    runs_workspace = 'Runs'
-    reference_result_file = 'INTERReductionResult.nxs'
-    result_workspace = 'Result'
+    input_workspaces_file = 'INTERReductionTestRuns.nxs'
+    reference_file = 'INTERReductionResult.nxs'
+
     expected_fit_params={
         'Name': ['Theta', 'ScaleFactor', 'AirSLD', 'BulkSLD', 'Roughness', 'BackGround', 'Resolution',
                  'SLD_Layer0', 'd_Layer0', 'Rough_Layer0', 'Cost function value'],
@@ -52,111 +48,29 @@ class INTERReductionTest(systemtesting.MantidSystemTest):
         self.tolerance = 1e-6
 
     def requiredFiles(self):
-        return [self.reference_result_file, self.runs_file]
+        return [self.reference_file, self.input_workspaces_file]
 
     def validate(self):
-        return (self.result_workspace, self.reference_result_file)
+        return (self.result_workspace_name, self.reference_file)
 
     def runTest(self):
-        setupInstrument()
-        Load(self.runs_file, OutputWorkspace=self.runs_workspace)
-        workspaces_to_exclude_from_result = AnalysisDataService.Instance().getObjectNames()
-        createTransmissionWorkspaces(self.first_transmission_run_names,
-                                     self.second_transmission_run_names,
+        self.setupTest()
+        stitchTransmissionWorkspaces(self.first_transmission_runs,
+                                     self.second_transmission_runs,
                                      self.transmission_workspace_names)
-
         testEventDataTimeSlicing(self.event_run_numbers)
         testReductionOfThreeAngleFringedSolidLiquidExample([45222, 45223, 45224])
         testReductionOfTwoAngleAirLiquidExample([44984, 44985])
         testFittingOfReducedData(44990, 44991, self.expected_fit_params, self.expected_fit_covariance)
-
-        removeWorkspaces(workspaces_to_exclude_from_result)
-        GroupWorkspaces(InputWorkspaces=AnalysisDataService.Instance().getObjectNames(),
-                        OutputWorkspace=self.result_workspace)
-        mtd[self.result_workspace].sortByName()
-
-    @staticmethod
-    def regenerateRunsFile():
-        setupInstrument()
-        regenerateRunsFile(INTERReductionTest.first_transmission_run_names +
-                           INTERReductionTest.second_transmission_run_names,
-                           INTERReductionTest.run_numbers,
-                           INTERReductionTest.event_run_numbers)
+        self.finaliseResults()
 
     @staticmethod
     def regenerateReferenceFileByReducing():
         setupInstrument()
         test = INTERReductionTest()
         test.runTest()
-        SaveNexus(InputWorkspace=INTERReductionTest.result_workspace,
-                  Filename=INTERReductionTest.reference_result_file)
-
-    @staticmethod
-    def regenerateReferenceFileFromDirectory(reference_file_directory):
-        setupInstrument()
-        regenerateReferenceFile(reference_file_directory, INTERReductionTest.reference_result_file)
-
-
-def setupInstrument():
-    configI = ConfigService.Instance()
-    configI.setString("default.instrument", "INTER")
-    configI.setString("default.facility", "ISIS")
-
-
-def removeWorkspaces(to_remove):
-    for workspace_name in to_remove:
-        AnalysisDataService.Instance().remove(workspace_name)
-
-
-def workspaceName(file_path):
-    return os.path.splitext(os.path.basename(file_path))[0]
-
-
-def regenerateReferenceFile(reference_file_directory, output_filename):
-    '''Generate the reference file from a given folder of output workspaces'''
-    files = os.listdir(reference_file_directory)
-    workspace_names = []
-    for file in files:
-        workspace_name = WorkspaceName(file)
-        Load(file, OutputWorkspace=workspace_name)
-        workspace_names.append(workspace_name)
-
-    output_workspace_name = 'Output'
-    GroupWorkspaces(InputWorkspaces=workspace_names, OutputWorkspace=output_workspace_name)
-    mtd[output_workspace_name].sortByName()
-    SaveNexus(InputWorkspace=output_workspace_name, Filename=output_filename)
-
-
-def regenerateRunsFile(transmission_run_names, run_numbers, event_run_numbers):
-    '''Generate the test input file from a range of run numbers and transmission runs.'''
-    # Load transmission runs
-    for run in transmission_run_names:
-        Load('{}.raw'.format(run), OutputWorkspace=run)
-    # Load raw run files
-    run_names = [str(run_number)+'.raw' for run_number in run_numbers]
-    for run_name in run_names:
-        Load(run_name, OutputWorkspace=run_name)
-    # Load event workspaces
-    event_run_names = [str(event_run_number) for event_run_number in event_run_numbers]
-    for event_run_name in event_run_names:
-        LoadEventNexus(event_run_name, OutputWorkspace=event_run_name, LoadMonitors=True)
-    event_monitor_names = [str(run_number)+'_monitors' for run_number in event_run_numbers]
-    # Group and save
-    GroupWorkspaces(InputWorkspaces=run_names + transmission_run_names + event_run_names +
-                    event_monitor_names,
-                    OutputWorkspace='Input')
-    SaveNexus(InputWorkspace='Input', Filename='INTERReductionTestRuns.nxs')
-
-
-def createTransmissionWorkspaces(runs1, runs2, output_names):
-    '''Create a transmission workspace for each pair of input runs with the given output names'''
-    for run1, run2, name in zip(runs1, runs2, output_names):
-        CreateTransmissionWorkspaceAuto(
-            FirstTransmissionRun=run1,
-            SecondTransmissionRun=run2,
-            OutputWorkspace=name,
-            StartOverlap=10,
-            EndOverlap=12)
+        SaveNexus(InputWorkspace=INTERReductionTest.reference_workspace_name,
+                  Filename=INTERReductionTest.reference_file)
 
 
 def eventRef(run_number, angle, start=0, stop=0, DB='TRANS'):
@@ -189,10 +103,10 @@ def eventRef(run_number, angle, start=0, stop=0, DB='TRANS'):
     AppendSpectra(InputWorkspace1='mon_rebin', InputWorkspace2=slice_name,
                   OutputWorkspace=slice_name, MergeLogs=False)
     # Reduce this slice
-    ReflectometryReductionOneAuto(InputWorkspace=slice_name, FirstTransmissionRun=DB,
-                                  OutputWorkspaceBinned=slice_name+'_ref_binned',
-                                  OutputWorkspace=slice_name+'_ref',
-                                  OutputWorkspaceWavelength=slice_name+'_lam', Debug=True)
+    ReflectometryISISLoadAndProcess(InputRunList=slice_name, FirstTransmissionRunList=DB,
+                                    OutputWorkspaceBinned=slice_name+'_ref_binned',
+                                    OutputWorkspace=slice_name+'_ref',
+                                    OutputWorkspaceWavelength=slice_name+'_lam', Debug=True)
     # Delete interim workspaces
     DeleteWorkspace(slice_name+'_lam')
     DeleteWorkspace(slice_name)
@@ -201,14 +115,6 @@ def eventRef(run_number, angle, start=0, stop=0, DB='TRANS'):
     DeleteWorkspace('mon_rebin')
 
 
-def stitchedWorkspaceName(run1_number, run2_number):
-    '''Gets the name of the stitched workspace based on the two input workspace names'''
-    run1_name=str(run1_number)
-    run2_name=str(run2_number)
-    run2_short_name=run2_name[-2:]
-    return run1_name+'_'+run2_short_name
-
-
 def quickRef(run_numbers=[], trans_workspace_names=[], angles=[]):
     '''Perform reflectometry reduction on each input run, and stitch the
     reduced workspaces together'''
@@ -216,12 +122,12 @@ def quickRef(run_numbers=[], trans_workspace_names=[], angles=[]):
     for run_index in range(len(run_numbers)):
         # Set up the reduction properties
         run_name=str(run_numbers[run_index])
-        properties = {'InputWorkspace': run_name+'.raw',
-                      'FirstTransmissionRun': str(trans_workspace_names[run_index]),
+        properties = {'InputRunList': run_name+'.raw',
+                      'FirstTransmissionRunList': str(trans_workspace_names[run_index]),
                       'OutputWorkspaceBinned': run_name+'_IvsQ_binned',
                       'OutputWorkspace': run_name+'_IvsQ',
                       'OutputWorkspaceWavelength': run_name+'_IvsLam',
-                      'Debug':True}
+                      'Debug': True}
         # Set ThetaIn if the angles are given
         if angles:
             theta=angles[run_index]
@@ -230,7 +136,7 @@ def quickRef(run_numbers=[], trans_workspace_names=[], angles=[]):
             if theta == 0.8:
                 properties['WavelengthMin']=2.6
         # Do the reduction
-        ReflectometryReductionOneAuto(**properties)
+        ReflectometryISISLoadAndProcess(**properties)
         reduced_runs=reduced_runs+run_name+'_IvsQ_binned'
         if run_index < len(run_numbers)-1:
             reduced_runs=reduced_runs+','
@@ -299,7 +205,7 @@ def compareFitResults(results_dict, reference_dict, tolerance):
                                    "messages for details")
 
 
-def generateTimeSlices(run_number):
+def sliceAndReduceRun(run_number):
     '''Generate 60 second time slices of the given run, and perform reflectometry
     reduction on each slice'''
     for slice_index in range(5):
@@ -310,7 +216,7 @@ def generateTimeSlices(run_number):
 
 def testEventDataTimeSlicing(event_run_numbers):
     for run_number in event_run_numbers:
-        generateTimeSlices(run_number)
+        sliceAndReduceRun(run_number)
 
 
 def testReductionOfThreeAngleFringedSolidLiquidExample(run_numbers):
@@ -337,11 +243,3 @@ def testFittingOfReducedData(run1_number, run2_number, expected_fit_params, expe
     print('run ', str(run1_number))
     stitched_name=stitchedWorkspaceName(run1_number, run2_number)
     twoAngleFit(stitched_name, scalefactor, expected_fit_params, expected_fit_covariance)
-
-
-# If you want to re-run the test and save the result as a reference...
-#   INTERReductionTest.regenerateReferenceFileByReducing()
-
-# or
-# If you have workspaces in a folder to use as a reference...
-#   INTERReductionTest.regenerateReferenceFileFromDirectory("Path/To/Folder")
diff --git a/Testing/SystemTests/tests/analysis/ISISReflectometryAutoreductionTest.py b/Testing/SystemTests/tests/analysis/ISISReflectometryAutoreductionTest.py
index d1e68d805899d16566daeca2ac4b7cb50f6a5c23..39ff2717fb919c6a7410bc6d27e9e1b5ef20a640 100644
--- a/Testing/SystemTests/tests/analysis/ISISReflectometryAutoreductionTest.py
+++ b/Testing/SystemTests/tests/analysis/ISISReflectometryAutoreductionTest.py
@@ -8,172 +8,57 @@
 System Test for ISIS Reflectometry autoreduction
 Adapted from scripts provided by Max Skoda.
 """
+from ISISReflectometryWorkflowBase import *
 import re
 import itertools
 import math
 import systemtesting
 from operator import itemgetter
 from mantid.simpleapi import *
-from mantid import ConfigService
 from isis_reflectometry.combineMulti import combineDataMulti, getWorkspace
 
 
-class ISISReflectometryAutoreductionTest(systemtesting.MantidSystemTest):
+class ISISReflectometryAutoreductionTest(systemtesting.MantidSystemTest,
+                                         ISISReflectometryWorkflowBase):
     # NOTE: When updating the run range used be sure to update the run_titles table below.
-    # You may also find the regenerate functions useful.
     investigation_id = 1710262
     run_numbers = range(44319, 44349)
-    transmission_run_names = ['44297', '44296']
-    runs_file = 'ISISReflectometryAutoreductionTestRuns.nxs'
-    runs_workspace = 'Runs'
-    reference_result_file = 'ISISReflectometryAutoreductionResult.nxs'
-    result_workspace = 'Result'
+    first_transmission_runs = ['44297']
+    second_transmission_runs = ['44296']
+    input_workspaces_file = 'ISISReflectometryAutoreductionTestRuns.nxs'
+    reference_file = 'ISISReflectometryAutoreductionResult.nxs'
 
     def __init__(self):
         super(ISISReflectometryAutoreductionTest, self).__init__()
-        self.tolerance = 0.00000001
+        self.tolerance = 1e-6
 
     def requiredFiles(self):
-        return [self.reference_result_file, self.runs_file]
+        return [self.reference_file, self.input_workspaces_file]
 
     def validate(self):
-        return (self.result_workspace, self.reference_result_file)
+        return (self.result_workspace_name, self.reference_file)
 
     def runTest(self):
-        ConfigService.Instance().setString("default.instrument", "INTER")
-        Load(self.runs_file, OutputWorkspace=self.runs_workspace)
-        CreateTransmissionWorkspaces(self.transmission_run_names[0],
-                                     self.transmission_run_names[1],
-                                     scale=False)
-        workspaces_to_exclude_from_result = AnalysisDataService.Instance().getObjectNames()
-        stitched_name = StitchedTransmissionWorkspaceName(self.transmission_run_names[0], self.transmission_run_names[1])
-        Stitch1D(
-            LHSWorkspace=TransmissionWorkspaceName(self.transmission_run_names[0]),
-            RHSWorkspace=TransmissionWorkspaceName(self.transmission_run_names[1]),
-            StartOverlap=10,
-            EndOverlap=12,
-            ScaleRHSWorkspace=False,
-            OutputWorkspace=stitched_name)
-        AutoReduce([stitched_name, stitched_name],
-                   self.run_numbers)
-        RemoveWorkspaces(workspaces_to_exclude_from_result)
-        GroupWorkspaces(InputWorkspaces=AnalysisDataService.Instance().getObjectNames(),
-                        OutputWorkspace=self.result_workspace)
-        mtd[self.result_workspace].sortByName()
+        self.setupTest()
+        self.workspaces_to_exclude_from_result = AnalysisDataService.Instance().getObjectNames()
 
-    @staticmethod
-    def regenerateRunsFile():
-        RegenerateRunsFile(ISISReflectometryAutoreductionTest.transmission_run_names,
-                           ISISReflectometryAutoreductionTest.run_numbers)
+        stitched_name = stitchedTransmissionWorkspaceName(self.first_transmission_runs[0],
+                                                          self.second_transmission_runs[0])
+        stitchTransmissionWorkspaces(self.first_transmission_runs,
+                                     self.second_transmission_runs,
+                                     [stitched_name], False)
 
-    @staticmethod
-    def run():
-        test = ISISReflectometryAutoreductionTest()
-        test.runTest()
+        AutoReduce([stitched_name, stitched_name], self.run_numbers)
+
+        self.finaliseResults()
 
     @staticmethod
     def regenerateReferenceFileByReducing():
+        setupInstrument()
         test = ISISReflectometryAutoreductionTest()
         test.runTest()
-        SaveNexus(InputWorkspace=ISISReflectometryAutoreductionTest.result_workspace,
-                  Filename=ISISReflectometryAutoreductionTest.reference_result_file)
-
-    @staticmethod
-    def regenerateReferenceFileFromDirectory(reference_file_directory):
-        RegenerateReferenceFile(reference_file_directory, ISISReflectometryAutoreductionTest.reference_result_file)
-
-    @staticmethod
-    def regenerateRunTitles():
-        RegenerateRunTitles(ISISReflectometryAutoreductionTest.investigation_id)
-
-
-def RemoveWorkspaces(to_remove):
-    for workspace_name in to_remove:
-        AnalysisDataService.Instance().remove(workspace_name)
-
-
-def WorkspaceName(file_path):
-    return os.path.splitext(os.path.basename(file_path))[0]
-
-
-def RegenerateReferenceFile(reference_file_directory, output_filename):
-    files = os.listdir(reference_file_directory)
-    workspace_names = []
-    for file in files:
-        workspace_name = WorkspaceName(file)
-        Load(file, OutputWorkspace=workspace_name)
-        workspace_names.append(workspace_name)
-
-    output_workspace_name = 'Output'
-    GroupWorkspaces(InputWorkspaces=workspace_names, OutputWorkspace=output_workspace_name)
-    mtd[output_workspace_name].sortByName()
-    SaveNexus(InputWorkspace=output_workspace_name, Filename=output_filename)
-
-
-def RegenerateRunsFile(transmission_run_names, run_range):
-    "This is used to generate the test input file from a range of run numbers"
-    "and transmission runs."
-    from mantid.simpleapi import (Load, GroupWorkspaces)
-
-    for run in transmission_run_names:
-        Load('{}.raw'.format(run), OutputWorkspace=run)
-
-    run_names = [str(run_number) for run_number in run_range]
-    file_names = ["{}.raw".format(run_name) for run_name in run_names]
-
-    for run_name, file_name in zip(run_names, file_names):
-        Load(file_name, OutputWorkspace=run_name)
-
-    GroupWorkspaces(InputWorkspaces=run_names + transmission_run_names, OutputWorkspace='Input')
-    SaveNexus(InputWorkspace='Input', Filename='ISISReflectometryAutoreductionTestRuns.nxs')
-
-
-def RegenerateRunTitles(investigation_id):
-    "Uses the old reflectometry gui python modules to generate the runs table from ICAT."
-    "A local copy of the table generated is stored in run_titles below."
-    "You may be able to use this script to update it."
-    # self.listMain.clear()
-
-    # Use ICAT for a journal search based on the RB number
-
-    active_session_id = None
-    if CatalogManager.numberActiveSessions() == 0:
-        # Execute the CatalogLoginDialog
-        login_alg = CatalogLoginDialog()
-        session_object = login_alg.getProperty("KeepAlive").value
-        active_session_id = session_object.getPropertyValue("Session")
-
-    # Fetch out an existing session id
-    # This might be another catalog session, but at present there is
-    # no way to tell.
-    active_session_id = CatalogManager.getActiveSessions()[-1].getSessionId()
-
-    search_alg = AlgorithmManager.create('CatalogGetDataFiles')
-    search_alg.initialize()
-    search_alg.setChild(True)  # Keeps the results table out of the ADS
-    search_alg.setProperty('InvestigationId', str(investigation_id))
-    search_alg.setProperty('Session', active_session_id)
-    search_alg.setPropertyValue('OutputWorkspace', '_dummy')
-    search_alg.execute()
-    search_results = search_alg.getProperty('OutputWorkspace').value
-
-    # self.__icat_file_map = {}
-    # self.statusMain.clearMessage()
-    runlist = []
-    for row in search_results:
-        file_name = row['Name']
-        description = row['Description']
-        run_number = re.search(r'[1-9]\d+', file_name).group()
-
-        # Filter to only display and map raw files.
-        if bool(re.search('(raw)$', file_name, re.IGNORECASE)):
-            title = (run_number + '~ ' + description).strip()
-            runlist.append(title)
-    # self.SampleText.__icat_file_map[title] = #(file_id, run_number, file_name)
-    # self.listMain.addItem(title)
-    # self.listMain.sortItems()
-    return runlist
-    # del search_results
+        SaveNexus(InputWorkspace=self.reference_workspace_name,
+                  Filename=self.reference_file)
 
 
 run_titles = [
@@ -290,27 +175,27 @@ def AutoReduce(transRun=[], runRange=[], oldList=[]):
 
             if float(angle) > 0.0:
                 ws = str(runno)
-                # w1 = mtd[runno + '.raw']
-                # spectra = w1.getRun().getLogData('nspectra').value
                 if not mtd.doesExist(runno + '_IvsQ'):
                     th = angle
                     if len(transRun) > 1 and angle > 2.25:
                         wq, wq_binned = \
-                            ReflectometryReductionOneAuto(
-                                InputWorkspace=ws,
-                                FirstTransmissionRun=transRun[1],
+                            ReflectometryISISLoadAndProcess(
+                                InputRunList=ws,
+                                FirstTransmissionRunList=transRun[1],
                                 thetaIn=angle,
+                                StartOverlap=10,
+                                EndOverlap=12,
                                 OutputWorkspace=runno + '_IvsQ',
-                                OutputWorkspaceWavelength=runno + '_IvsLam',
                                 OutputWorkspaceBinned=runno + '_IvsQ_binned')
                     else:
                         wq, wqbinned = \
-                            ReflectometryReductionOneAuto(
-                                InputWorkspace=ws,
-                                FirstTransmissionRun=transRun[0],
+                            ReflectometryISISLoadAndProcess(
+                                InputRunList=ws,
+                                FirstTransmissionRunList=transRun[0],
                                 thetaIn=angle,
+                                StartOverlap=10,
+                                EndOverlap=12,
                                 OutputWorkspace=runno + '_IvsQ',
-                                OutputWorkspaceWavelength=runno + '_IvsLam',
                                 OutputWorkspaceBinned=runno + '_IvsQ_binned')
                 else:
                     wq = mtd[runno + '_IvsQ']
@@ -409,14 +294,6 @@ def SortRuns(tupsort):
     return sortedList
 
 
-def TransmissionWorkspaceName(run):
-    return "TRANS_{}".format(run)
-
-
-def StitchedTransmissionWorkspaceName(run_number_1, run_number_2):
-    return 'TRANS_{}_{}'.format(run_number_1, run_number_2)
-
-
 def CreateTransmissionWorkspaces(run1, run2, scale=False):
     CreateTransmissionWorkspaceAuto(
         run1,
@@ -428,10 +305,3 @@ def CreateTransmissionWorkspaces(run1, run2, scale=False):
         OutputWorkspace=TransmissionWorkspaceName(run2),
         StartOverlap=10,
         EndOverlap=12)
-
-# If you want to re-run the test and save the result as a reference...
-#   ISISReflectometryAutoreductionTest.regenerateReferenceFileByReducing()
-
-# or
-# If you have workspaces in a folder to use as a reference...
-#   ISISReflectometryAutoreductionTest.regenerateReferenceFileFromDirectory("Path/To/Folder")
diff --git a/Testing/SystemTests/tests/analysis/ISISReflectometryWorkflowBase.py b/Testing/SystemTests/tests/analysis/ISISReflectometryWorkflowBase.py
new file mode 100644
index 0000000000000000000000000000000000000000..ef93458dc8352231afac1de32d02a60021104328
--- /dev/null
+++ b/Testing/SystemTests/tests/analysis/ISISReflectometryWorkflowBase.py
@@ -0,0 +1,222 @@
+# Mantid Repository : https://github.com/mantidproject/mantid
+#
+# Copyright &copy; 2019 ISIS Rutherford Appleton Laboratory UKRI,
+#     NScD Oak Ridge National Laboratory, European Spallation Source
+#     & Institut Laue - Langevin
+# SPDX - License - Identifier: GPL - 3.0 +
+"""
+System Test for ISIS Reflectometry reduction
+Adapted from scripts provided by Max Skoda.
+"""
+from __future__ import (print_function)
+from mantid.simpleapi import *
+from mantid import ConfigService
+
+
+class ISISReflectometryWorkflowBase():
+    '''
+    Base class for testing the ISIS Reflectometry workflow algorithms
+
+    You may find the regenerate functions useful:
+    - If you want to re-run the test and save the result as a reference...
+        <TESTSUITE>.regenerateReferenceFileByReducing()
+    - If you have workspaces in a folder to use as a reference...
+        <TESTSUITE>.regenerateReferenceFileFromDirectory("Path/To/Folder")
+    where <TESTSUITE> is the derived test class name.
+    '''
+
+    # Derived class should set these run numbers up as required
+    investigation_id = None
+    run_numbers = None
+    event_run_numbers = None
+    first_transmission_runs = None
+    second_transmission_runs = None
+    input_workspaces_file = None
+    reference_file = None
+
+    # Default names for input and output workspace groups
+    input_runs_workspace_name = 'Runs'
+    result_workspace_name = 'Result'
+
+    def setupTest(self):
+        '''Set up the instrument and any required workspaces ready for
+        the start of the test'''
+        setupInstrument()
+
+        if self.input_workspaces_file is not None:
+            Load(self.input_workspaces_file, OutputWorkspace=self.input_runs_workspace_name)
+
+        self.workspaces_to_exclude_from_result = AnalysisDataService.Instance().getObjectNames()
+
+    def finaliseResults(self):
+        '''Clear interim workspaces and group required outputs into the final
+        result ready for comparison with the reference'''
+        removeWorkspaces(self.workspaces_to_exclude_from_result)
+        GroupWorkspaces(InputWorkspaces=AnalysisDataService.Instance().getObjectNames(),
+                        OutputWorkspace=self.result_workspace_name)
+        mtd[self.result_workspace_name].sortByName()
+
+    def regenerateRunsFile():
+        setupInstrument()
+        regenerateRunsFile(self.first_transmission_runs + self.second_transmission_runs,
+                           self.run_numbers, self.input_run_file)
+
+    def regenerateReferenceFileFromDirectory(reference_file_directory):
+        setupInstrument()
+        regenerateReferenceFile(reference_file_directory, self.reference_file)
+
+    def regenerateRunTitles():
+        RegenerateRunTitles(self.investigation_id)
+
+
+def setupInstrument():
+    configI = ConfigService.Instance()
+    configI.setString("default.instrument", "INTER")
+    configI.setString("default.facility", "ISIS")
+
+
+def removeWorkspaces(to_remove):
+    for workspace_name in to_remove:
+        AnalysisDataService.Instance().remove(workspace_name)
+
+
+def workspaceName(file_path):
+    return os.path.splitext(os.path.basename(file_path))[0]
+
+
+def stitchedWorkspaceName(run1_number, run2_number):
+    '''Gets the name of the stitched workspace based on the two input workspace names'''
+    run1_name=str(run1_number)
+    run2_name=str(run2_number)
+    run2_short_name=run2_name[-2:]
+    return run1_name+'_'+run2_short_name
+
+
+def transmissionWorkspaceName(run):
+    return "TRANS_{}".format(run)
+
+
+def stitchedTransmissionWorkspaceName(run_number_1, run_number_2):
+    return 'TRANS_{}_{}'.format(run_number_1, run_number_2)
+
+
+def stitchTransmissionWorkspaces(runs1, runs2, output_names, scaleRHSWorkspace=True):
+    '''Create a transmission workspace for each pair of input runs with the given output names'''
+    for run1, run2, name in zip(runs1, runs2, output_names):
+        CreateTransmissionWorkspaceAuto(
+            FirstTransmissionRun=run1,
+            SecondTransmissionRun=run2,
+            OutputWorkspace=name,
+            StartOverlap=10,
+            EndOverlap=12,
+            ScaleRHSWorkspace=scaleRHSWorkspace)
+
+
+def reduceRun(run_number, angle, first_transmission_runs = [], second_transmission_runs = [],
+              time_interval = None, suffix = '_IvsQ', debug = False):
+    ''' Perform reflectometry reduction on the run'''
+    run_name=str(run_number)
+    if time_interval is not None:
+        do_slicing = True
+    else:
+        do_slicing = False
+    # Reduce this run
+    ReflectometryISISLoadAndProcess(InputRunList=run_name, Debug=debug,
+                                    ProcessingInstructions='4',
+                                    StartOverlap=10, EndOverlap=12,
+                                    FirstTransmissionRunList=','.join(first_transmission_runs),
+                                    SecondTransmissionRunList=','.join(second_transmission_runs),
+                                    SliceWorkspace=do_slicing, TimeInterval=time_interval,
+                                    UseNewFilterAlgorithm=False,
+                                    OutputWorkspaceBinned=run_name + suffix + '_binned',
+                                    OutputWorkspace=run_name + suffix,
+                                    OutputWorkspaceWavelength=run_name + '_lam')
+    # Delete interim workspaces
+    DeleteWorkspace(run_name + '_lam')
+
+
+def regenerateReferenceFile(reference_file_directory, output_filename):
+    '''Generate the reference file from a given folder of output workspaces'''
+    files = os.listdir(reference_file_directory)
+    workspace_names = []
+    for file in files:
+        workspace_name = WorkspaceName(file)
+        Load(file, OutputWorkspace=workspace_name)
+        workspace_names.append(workspace_name)
+
+    output_workspace_name = 'Output'
+    GroupWorkspaces(InputWorkspaces=workspace_names, OutputWorkspace=output_workspace_name)
+    mtd[output_workspace_name].sortByName()
+    SaveNexus(InputWorkspace=output_workspace_name, Filename=output_filename)
+
+
+def regenerateRunsFile(transmission_run_names, run_numbers, event_run_numbers,
+                       input_workspaces_file):
+    '''Generate the test input file from a range of run numbers and transmission runs.'''
+    # Load transmission runs
+    for run in transmission_run_names:
+        Load('{}.raw'.format(run), OutputWorkspace=run)
+    # Load raw run files
+    run_names = [str(run_number) for run_number in run_range]
+    file_names = ["{}.raw".format(run_name) for run_name in run_names]
+
+    for run_name, file_name in zip(run_names, file_names):
+        Load(file_name, OutputWorkspace=run_name)
+    # Load event workspaces
+    event_run_names = [str(event_run_number) for event_run_number in event_run_numbers]
+    for event_run_name in event_run_names:
+        LoadEventNexus(event_run_name, OutputWorkspace=event_run_name, LoadMonitors=True)
+    event_monitor_names = [str(run_number)+'_monitors' for run_number in event_run_numbers]
+    # Group and save
+    GroupWorkspaces(InputWorkspaces=run_names + transmission_run_names + event_run_names +
+                    event_monitor_names,
+                    OutputWorkspace='Input')
+    SaveNexus(InputWorkspace='Input', Filename=input_workspaces_file)
+
+
+def RegenerateRunTitles(investigation_id):
+    """Uses the old reflectometry gui python modules to generate the runs table from ICAT.
+    A local copy of the table generated is stored in run_titles below.
+    You may be able to use this script to update it."""
+    # self.listMain.clear()
+
+    # Use ICAT for a journal search based on the RB number
+
+    active_session_id = None
+    if CatalogManager.numberActiveSessions() == 0:
+        # Execute the CatalogLoginDialog
+        login_alg = CatalogLoginDialog()
+        session_object = login_alg.getProperty("KeepAlive").value
+        active_session_id = session_object.getPropertyValue("Session")
+
+    # Fetch out an existing session id
+    # This might be another catalog session, but at present there is
+    # no way to tell.
+    active_session_id = CatalogManager.getActiveSessions()[-1].getSessionId()
+
+    search_alg = AlgorithmManager.create('CatalogGetDataFiles')
+    search_alg.initialize()
+    search_alg.setChild(True)  # Keeps the results table out of the ADS
+    search_alg.setProperty('InvestigationId', str(investigation_id))
+    search_alg.setProperty('Session', active_session_id)
+    search_alg.setPropertyValue('OutputWorkspace', '_dummy')
+    search_alg.execute()
+    search_results = search_alg.getProperty('OutputWorkspace').value
+
+    # self.__icat_file_map = {}
+    # self.statusMain.clearMessage()
+    runlist = []
+    for row in search_results:
+        file_name = row['Name']
+        description = row['Description']
+        run_number = re.search(r'[1-9]\d+', file_name).group()
+
+        # Filter to only display and map raw files.
+        if bool(re.search('(raw)$', file_name, re.IGNORECASE)):
+            title = (run_number + '~ ' + description).strip()
+            runlist.append(title)
+    # self.SampleText.__icat_file_map[title] = #(file_id, run_number, file_name)
+    # self.listMain.addItem(title)
+    # self.listMain.sortItems()
+    return runlist
+    # del search_results
diff --git a/Testing/SystemTests/tests/analysis/ISISReflectometryWorkflowPreprocessingTest.py b/Testing/SystemTests/tests/analysis/ISISReflectometryWorkflowPreprocessingTest.py
new file mode 100644
index 0000000000000000000000000000000000000000..6a0d07a649edfbc42bd9612735ef64520f56dee2
--- /dev/null
+++ b/Testing/SystemTests/tests/analysis/ISISReflectometryWorkflowPreprocessingTest.py
@@ -0,0 +1,52 @@
+# Mantid Repository : https://github.com/mantidproject/mantid
+#
+# Copyright &copy; 2019 ISIS Rutherford Appleton Laboratory UKRI,
+#     NScD Oak Ridge National Laboratory, European Spallation Source
+#     & Institut Laue - Langevin
+# SPDX - License - Identifier: GPL - 3.0 +
+"""
+System Test for ISIS Reflectometry autoreduction
+Adapted from scripts provided by Max Skoda.
+"""
+from ISISReflectometryWorkflowBase import *
+import systemtesting
+
+
+class ISISReflectometryWorkflowPreprocessingTest(systemtesting.MantidSystemTest,
+                                                 ISISReflectometryWorkflowBase):
+    '''
+    Script to test that the ISIS Reflectometry workflow successfully performs
+    required preprocessing of input runs and transmission runs before it
+    performs the reduction, when those inputs are not already in the ADS.
+    '''
+
+    run_numbers = ['45222']
+    first_transmission_runs = ['45226']
+    second_transmission_runs = ['45227']
+    input_workspaces_file = 'ISISReflectometryTestRuns.nxs'
+    reference_file = 'ISISReflectometryReducedRunsResult.nxs'
+
+    def __init__(self):
+        super(ISISReflectometryWorkflowPreprocessingTest, self).__init__()
+        self.tolerance = 1e-6
+
+    def requiredFiles(self):
+        return [self.reference_file, self.input_workspaces_file]
+
+    def validate(self):
+        return (self.result_workspace_name, self.reference_file)
+
+    def runTest(self):
+        self.setupTest()
+        reduceRun(run_number = self.run_numbers[0], angle = 0.7,
+                  first_transmission_runs = self.first_transmission_runs,
+                  second_transmission_runs = self.second_transmission_runs)
+        self.finaliseResults()
+
+    @staticmethod
+    def regenerateReferenceFileByReducing():
+        setupInstrument()
+        test = ISISReflectometryWorkflowPreprocessingTest()
+        test.runTest()
+        SaveNexus(InputWorkspace=self.result_workspace_name,
+                  Filename=self.reference_file)
diff --git a/Testing/SystemTests/tests/analysis/ISISReflectometryWorkflowSlicingTest.py b/Testing/SystemTests/tests/analysis/ISISReflectometryWorkflowSlicingTest.py
new file mode 100644
index 0000000000000000000000000000000000000000..0921f32bdbd1f7c97a4b73b23f0cd986f2c63f87
--- /dev/null
+++ b/Testing/SystemTests/tests/analysis/ISISReflectometryWorkflowSlicingTest.py
@@ -0,0 +1,50 @@
+# Mantid Repository : https://github.com/mantidproject/mantid
+#
+# Copyright &copy; 2019 ISIS Rutherford Appleton Laboratory UKRI,
+#     NScD Oak Ridge National Laboratory, European Spallation Source
+#     & Institut Laue - Langevin
+# SPDX - License - Identifier: GPL - 3.0 +
+from ISISReflectometryWorkflowBase import *
+import systemtesting
+
+
+class ISISReflectometryWorkflowSlicingTest(systemtesting.MantidSystemTest,
+                                           ISISReflectometryWorkflowBase):
+    '''
+    Test the ISIS Reflectometry workflow algorithms with event slicing
+    done internally in the workflow algorithm
+    '''
+    run_numbers = ['45222']
+    first_transmission_runs = ['45226']
+    second_transmission_runs = ['45227']
+    transmission_workspace_name = ['TRANS']
+    input_workspaces_file = 'ISISReflectometryEventTestRuns.nxs'
+    reference_file = 'ISISReflectometryWorkflowSlicingResult.nxs'
+
+    def __init__(self):
+        super(ISISReflectometryWorkflowSlicingTest, self).__init__()
+        self.tolerance = 1e-6
+
+    def requiredFiles(self):
+        return [self.reference_file, self.input_workspaces_file]
+
+    def validate(self):
+        return (self.result_workspace_name, self.reference_file)
+
+    def runTest(self):
+        self.setupTest()
+        reduceRun(self.run_numbers[0], 0.5, self.first_transmission_runs,
+                  self.second_transmission_runs, time_interval=60)
+        # Delete the interim transmission workspaces. These are currently output
+        # for input groups (i.e. when we're slicing) even when Debug is not on.
+        DeleteWorkspace('TRANS_LAM_45226')
+        DeleteWorkspace('TRANS_LAM_45227')
+        self.finaliseResults()
+
+    @staticmethod
+    def regenerateReferenceFileByReducing():
+        setupInstrument()
+        test = ISISReflectometryWorkflowSlicingTest()
+        test.runTest()
+        SaveNexus(InputWorkspace=self.result_workspace_name,
+                  Filename=self.reference_file)
diff --git a/Testing/SystemTests/tests/analysis/reference/ISISReflectometryReducedRunsResult.nxs.md5 b/Testing/SystemTests/tests/analysis/reference/ISISReflectometryReducedRunsResult.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..224426b6ad5bd56e0275702df0cf67ce1615fa8d
--- /dev/null
+++ b/Testing/SystemTests/tests/analysis/reference/ISISReflectometryReducedRunsResult.nxs.md5
@@ -0,0 +1 @@
+050d866089735a3107558c0effcfaae3
diff --git a/Testing/SystemTests/tests/analysis/reference/ISISReflectometryWorkflowSlicingResult.nxs.md5 b/Testing/SystemTests/tests/analysis/reference/ISISReflectometryWorkflowSlicingResult.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..4d67650e5224e0e406c25148e83bc47c078b6ce5
--- /dev/null
+++ b/Testing/SystemTests/tests/analysis/reference/ISISReflectometryWorkflowSlicingResult.nxs.md5
@@ -0,0 +1 @@
+6749ec6060b1e697e77ad29c9dc2ce4a