diff --git a/Framework/API/inc/MantidAPI/DetectorInfo.h b/Framework/API/inc/MantidAPI/DetectorInfo.h
index d338c54f401e05d78a444052c01892dd9e58fcb1..d61e17fed91b50f1c110dab531e92248e14f90a5 100644
--- a/Framework/API/inc/MantidAPI/DetectorInfo.h
+++ b/Framework/API/inc/MantidAPI/DetectorInfo.h
@@ -2,6 +2,7 @@
 #define MANTID_API_DETECTORINFO_H_
 
 #include "MantidAPI/DllConfig.h"
+#include "MantidKernel/DateAndTime.h"
 #include "MantidKernel/Quat.h"
 #include "MantidKernel/V3D.h"
 
@@ -76,18 +77,30 @@ public:
   size_t size() const;
 
   bool isMonitor(const size_t index) const;
+  bool isMonitor(const std::pair<size_t, size_t> &index) const;
   bool isMasked(const size_t index) const;
+  bool isMasked(const std::pair<size_t, size_t> &index) const;
   double l2(const size_t index) const;
+  double l2(const std::pair<size_t, size_t> &index) const;
   double twoTheta(const size_t index) const;
+  double twoTheta(const std::pair<size_t, size_t> &index) const;
   double signedTwoTheta(const size_t index) const;
+  double signedTwoTheta(const std::pair<size_t, size_t> &index) const;
   Kernel::V3D position(const size_t index) const;
+  Kernel::V3D position(const std::pair<size_t, size_t> &index) const;
   Kernel::Quat rotation(const size_t index) const;
+  Kernel::Quat rotation(const std::pair<size_t, size_t> &index) const;
 
   void setMasked(const size_t index, bool masked);
+  void setMasked(const std::pair<size_t, size_t> &index, bool masked);
   void clearMaskFlags();
 
   void setPosition(const size_t index, const Kernel::V3D &position);
+  void setPosition(const std::pair<size_t, size_t> &index,
+                   const Kernel::V3D &position);
   void setRotation(const size_t index, const Kernel::Quat &rotation);
+  void setRotation(const std::pair<size_t, size_t> &index,
+                   const Kernel::Quat &rotation);
 
   void setPosition(const Geometry::IComponent &comp, const Kernel::V3D &pos);
   void setRotation(const Geometry::IComponent &comp, const Kernel::Quat &rot);
@@ -105,6 +118,15 @@ public:
   /// This will throw an out of range exception if the detector does not exist.
   size_t indexOf(const detid_t id) const { return m_detIDToIndex.at(id); }
 
+  size_t scanCount(const size_t index) const;
+  std::pair<Kernel::DateAndTime, Kernel::DateAndTime>
+  scanInterval(const std::pair<size_t, size_t> &index) const;
+  void setScanInterval(
+      const size_t index,
+      const std::pair<Kernel::DateAndTime, Kernel::DateAndTime> &interval);
+
+  void merge(const DetectorInfo &other);
+
   friend class SpectrumInfo;
 
 private:
diff --git a/Framework/API/inc/MantidAPI/SpectrumInfo.h b/Framework/API/inc/MantidAPI/SpectrumInfo.h
index e0762a4a239001091ce5eff4175d85f239fdd0dd..d14718cd4bb287a8ed4bf5f40abaa46444d962b6 100644
--- a/Framework/API/inc/MantidAPI/SpectrumInfo.h
+++ b/Framework/API/inc/MantidAPI/SpectrumInfo.h
@@ -103,7 +103,8 @@ public:
 
 private:
   const Geometry::IDetector &getDetector(const size_t index) const;
-  std::vector<size_t> getDetectorIndices(const size_t index) const;
+  const SpectrumDefinition &
+  checkAndGetSpectrumDefinition(const size_t index) const;
 
   const ExperimentInfo &m_experimentInfo;
   DetectorInfo &m_detectorInfo;
diff --git a/Framework/API/src/DetectorInfo.cpp b/Framework/API/src/DetectorInfo.cpp
index 17e8bc99ceba9082fbb099b75a48d11260f7639a..867193aea5f17f070767e99f0fb425beb4a76642 100644
--- a/Framework/API/src/DetectorInfo.cpp
+++ b/Framework/API/src/DetectorInfo.cpp
@@ -70,11 +70,21 @@ bool DetectorInfo::isMonitor(const size_t index) const {
   return m_detectorInfo.isMonitor(index);
 }
 
+/// Returns true if the detector is a monitor.
+bool DetectorInfo::isMonitor(const std::pair<size_t, size_t> &index) const {
+  return m_detectorInfo.isMonitor(index);
+}
+
 /// Returns true if the detector is masked.
 bool DetectorInfo::isMasked(const size_t index) const {
   return m_detectorInfo.isMasked(index);
 }
 
+/// Returns true if the detector is masked.
+bool DetectorInfo::isMasked(const std::pair<size_t, size_t> &index) const {
+  return m_detectorInfo.isMasked(index);
+}
+
 /** Returns L2 (distance from sample to spectrum).
  *
  * For monitors this is defined such that L1+L2 = source-detector distance,
@@ -87,6 +97,18 @@ double DetectorInfo::l2(const size_t index) const {
     return position(index).distance(sourcePosition()) - l1();
 }
 
+/** Returns L2 (distance from sample to spectrum).
+ *
+ * For monitors this is defined such that L1+L2 = source-detector distance,
+ * i.e., for a monitor in the beamline between source and sample L2 is negative.
+ */
+double DetectorInfo::l2(const std::pair<size_t, size_t> &index) const {
+  if (!isMonitor(index))
+    return position(index).distance(samplePosition());
+  else
+    return position(index).distance(sourcePosition()) - l1();
+}
+
 /// Returns 2 theta (scattering angle w.r.t. to beam direction).
 double DetectorInfo::twoTheta(const size_t index) const {
   if (isMonitor(index))
@@ -105,6 +127,24 @@ double DetectorInfo::twoTheta(const size_t index) const {
   return sampleDetVec.angle(beamLine);
 }
 
+/// Returns 2 theta (scattering angle w.r.t. to beam direction).
+double DetectorInfo::twoTheta(const std::pair<size_t, size_t> &index) const {
+  if (isMonitor(index))
+    throw std::logic_error(
+        "Two theta (scattering angle) is not defined for monitors.");
+
+  const auto samplePos = samplePosition();
+  const auto beamLine = samplePos - sourcePosition();
+
+  if (beamLine.nullVector()) {
+    throw Kernel::Exception::InstrumentDefinitionError(
+        "Source and sample are at same position!");
+  }
+
+  const auto sampleDetVec = position(index) - samplePos;
+  return sampleDetVec.angle(beamLine);
+}
+
 /// Returns signed 2 theta (signed scattering angle w.r.t. to beam direction).
 double DetectorInfo::signedTwoTheta(const size_t index) const {
   if (isMonitor(index))
@@ -133,21 +173,68 @@ double DetectorInfo::signedTwoTheta(const size_t index) const {
   return angle;
 }
 
+/// Returns signed 2 theta (signed scattering angle w.r.t. to beam direction).
+double
+DetectorInfo::signedTwoTheta(const std::pair<size_t, size_t> &index) const {
+  if (isMonitor(index))
+    throw std::logic_error(
+        "Two theta (scattering angle) is not defined for monitors.");
+
+  const auto samplePos = samplePosition();
+  const auto beamLine = samplePos - sourcePosition();
+
+  if (beamLine.nullVector()) {
+    throw Kernel::Exception::InstrumentDefinitionError(
+        "Source and sample are at same position!");
+  }
+  // Get the instrument up axis.
+  const auto &instrumentUpAxis =
+      m_instrument->getReferenceFrame()->vecPointingUp();
+
+  const auto sampleDetVec = position(index) - samplePos;
+  double angle = sampleDetVec.angle(beamLine);
+
+  const auto cross = beamLine.cross_prod(sampleDetVec);
+  const auto normToSurface = beamLine.cross_prod(instrumentUpAxis);
+  if (normToSurface.scalar_prod(cross) < 0) {
+    angle *= -1;
+  }
+  return angle;
+}
+
 /// Returns the position of the detector with given index.
 Kernel::V3D DetectorInfo::position(const size_t index) const {
   return Kernel::toV3D(m_detectorInfo.position(index));
 }
 
+/// Returns the position of the detector with given index.
+Kernel::V3D
+DetectorInfo::position(const std::pair<size_t, size_t> &index) const {
+  return Kernel::toV3D(m_detectorInfo.position(index));
+}
+
 /// Returns the rotation of the detector with given index.
 Kernel::Quat DetectorInfo::rotation(const size_t index) const {
   return Kernel::toQuat(m_detectorInfo.rotation(index));
 }
 
+/// Returns the rotation of the detector with given index.
+Kernel::Quat
+DetectorInfo::rotation(const std::pair<size_t, size_t> &index) const {
+  return Kernel::toQuat(m_detectorInfo.rotation(index));
+}
+
 /// Set the mask flag of the detector with given index. Not thread safe.
 void DetectorInfo::setMasked(const size_t index, bool masked) {
   m_detectorInfo.setMasked(index, masked);
 }
 
+/// Set the mask flag of the detector with given index. Not thread safe.
+void DetectorInfo::setMasked(const std::pair<size_t, size_t> &index,
+                             bool masked) {
+  m_detectorInfo.setMasked(index, masked);
+}
+
 /** Sets all mask flags to false (unmasked). Not thread safe.
  *
  * This method was introduced to help with refactoring and may be removed in the
@@ -163,12 +250,24 @@ void DetectorInfo::setPosition(const size_t index,
   m_detectorInfo.setPosition(index, Kernel::toVector3d(position));
 }
 
+/// Set the absolute position of the detector with given index. Not thread safe.
+void DetectorInfo::setPosition(const std::pair<size_t, size_t> &index,
+                               const Kernel::V3D &position) {
+  m_detectorInfo.setPosition(index, Kernel::toVector3d(position));
+}
+
 /// Set the absolute rotation of the detector with given index. Not thread safe.
 void DetectorInfo::setRotation(const size_t index,
                                const Kernel::Quat &rotation) {
   m_detectorInfo.setRotation(index, Kernel::toQuaterniond(rotation));
 }
 
+/// Set the absolute rotation of the detector with given index. Not thread safe.
+void DetectorInfo::setRotation(const std::pair<size_t, size_t> &index,
+                               const Kernel::Quat &rotation) {
+  m_detectorInfo.setRotation(index, Kernel::toQuaterniond(rotation));
+}
+
 /** Set the absolute position of the component `comp`.
  *
  * This may or may not be a detector. Even if it is not a detector it will
@@ -180,6 +279,11 @@ void DetectorInfo::setPosition(const Geometry::IComponent &comp,
     const auto index = indexOf(det->getID());
     setPosition(index, pos);
   } else {
+    const auto &detIndices = getAssemblyDetectorIndices(comp);
+    if ((!detIndices.empty()) && m_detectorInfo.isScanning())
+      throw std::runtime_error("Cannot move parent component containing "
+                               "detectors since the beamline has "
+                               "time-dependent (moving) detectors.");
     // This will go badly wrong if the parameter map in the component is not
     // identical to ours, but there does not seem to be a way to check?
     const auto oldPos = comp.getPos();
@@ -189,7 +293,6 @@ void DetectorInfo::setPosition(const Geometry::IComponent &comp,
     // If comp is a detector cached positions stay valid. In all other cases
     // (higher level in instrument tree, or other leaf component such as sample
     // or source) we flush all cached positions.
-    const auto &detIndices = getAssemblyDetectorIndices(comp);
     if (detIndices.size() == 0 || detIndices.size() == size()) {
       // Update only if comp is not a bank (not detectors) or the full
       // instrument (all detectors). The should make this thread-safe for
@@ -222,6 +325,11 @@ void DetectorInfo::setRotation(const Geometry::IComponent &comp,
     const auto index = indexOf(det->getID());
     setRotation(index, rot);
   } else {
+    const auto &detIndices = getAssemblyDetectorIndices(comp);
+    if ((!detIndices.empty()) && m_detectorInfo.isScanning())
+      throw std::runtime_error("Cannot move parent component containing "
+                               "detectors since the beamline has "
+                               "time-dependent (moving) detectors.");
     // This will go badly wrong if the parameter map in the component is not
     // identical to ours, but there does not seem to be a way to check?
     const auto pos = toVector3d(comp.getPos());
@@ -234,7 +342,6 @@ void DetectorInfo::setRotation(const Geometry::IComponent &comp,
     // If comp is a detector cached positions and rotations stay valid. In all
     // other cases (higher level in instrument tree, or other leaf component
     // such as sample or source) we flush all cached positions and rotations.
-    const auto &detIndices = getAssemblyDetectorIndices(comp);
     if (detIndices.size() == 0 || detIndices.size() == size()) {
       // Update only if comp is not a bank (not detectors) or the full
       // instrument (all detectors). The should make this thread-safe for
@@ -289,6 +396,44 @@ const std::vector<detid_t> &DetectorInfo::detectorIDs() const {
   return m_detectorIDs;
 }
 
+/// Returns the scan count of the detector with given detector index.
+size_t DetectorInfo::scanCount(const size_t index) const {
+  return m_detectorInfo.scanCount(index);
+}
+
+/** Returns the scan interval of the detector with given index.
+ *
+ * The interval start and end values would typically correspond to nanoseconds
+ * since 1990, as in Kernel::DateAndTime. */
+std::pair<Kernel::DateAndTime, Kernel::DateAndTime>
+DetectorInfo::scanInterval(const std::pair<size_t, size_t> &index) const {
+  const auto &interval = m_detectorInfo.scanInterval(index);
+  return {interval.first, interval.second};
+}
+
+/** Set the scan interval of the detector with given detector index.
+ *
+ * The interval start and end values would typically correspond to nanoseconds
+ * since 1990, as in Kernel::DateAndTime. Note that it is currently not possible
+ * to modify scan intervals for a DetectorInfo with time-dependent detectors,
+ * i.e., time intervals must be set with this method before merging individual
+ * scans. */
+void DetectorInfo::setScanInterval(
+    const size_t index,
+    const std::pair<Kernel::DateAndTime, Kernel::DateAndTime> &interval) {
+  m_detectorInfo.setScanInterval(index, {interval.first.totalNanoseconds(),
+                                         interval.second.totalNanoseconds()});
+}
+
+/** Merges the contents of other into this.
+ *
+ * Scan intervals in both other and this must be set. Intervals must be
+ * identical or non-overlapping. If they are identical all other parameters (for
+ * that index) must match. */
+void DetectorInfo::merge(const DetectorInfo &other) {
+  m_detectorInfo.merge(other.m_detectorInfo);
+}
+
 const Geometry::IDetector &DetectorInfo::getDetector(const size_t index) const {
   size_t thread = static_cast<size_t>(PARALLEL_THREAD_NUMBER);
   if (m_lastIndex[thread] != index) {
diff --git a/Framework/API/src/IFunction.cpp b/Framework/API/src/IFunction.cpp
index 0a4d3a020ab4dc725bef0489fea15e9d8740c641..ec139871980858eda5c7856d85e716d5f0dfd6af 100644
--- a/Framework/API/src/IFunction.cpp
+++ b/Framework/API/src/IFunction.cpp
@@ -206,9 +206,7 @@ std::string IFunction::asString() const {
   }
   // print the parameters
   for (size_t i = 0; i < nParams(); i++) {
-    if (!isFixed(i)) {
-      ostr << ',' << parameterName(i) << '=' << getParameter(i);
-    }
+    ostr << ',' << parameterName(i) << '=' << getParameter(i);
   }
 
   // collect non-default constraints
diff --git a/Framework/API/src/SpectrumInfo.cpp b/Framework/API/src/SpectrumInfo.cpp
index d5e134e079c918f1d59ed269f0adc2e4f6f724f9..e2f8da502d8a203d21740515b8d3569f453a4173 100644
--- a/Framework/API/src/SpectrumInfo.cpp
+++ b/Framework/API/src/SpectrumInfo.cpp
@@ -42,7 +42,7 @@ SpectrumInfo::sharedSpectrumDefinitions() const {
 
 /// Returns true if the detector(s) associated with the spectrum are monitors.
 bool SpectrumInfo::isMonitor(const size_t index) const {
-  for (const auto detIndex : getDetectorIndices(index))
+  for (const auto &detIndex : checkAndGetSpectrumDefinition(index))
     if (!m_detectorInfo.isMonitor(detIndex))
       return false;
   return true;
@@ -51,7 +51,7 @@ bool SpectrumInfo::isMonitor(const size_t index) const {
 /// Returns true if the detector(s) associated with the spectrum are masked.
 bool SpectrumInfo::isMasked(const size_t index) const {
   bool masked = true;
-  for (const auto detIndex : getDetectorIndices(index))
+  for (const auto &detIndex : checkAndGetSpectrumDefinition(index))
     masked &= m_detectorInfo.isMasked(detIndex);
   return masked;
 }
@@ -63,7 +63,7 @@ bool SpectrumInfo::isMasked(const size_t index) const {
  */
 double SpectrumInfo::l2(const size_t index) const {
   double l2{0.0};
-  for (const auto detIndex : getDetectorIndices(index))
+  for (const auto &detIndex : checkAndGetSpectrumDefinition(index))
     l2 += m_detectorInfo.l2(detIndex);
   return l2 / static_cast<double>(spectrumDefinition(index).size());
 }
@@ -75,7 +75,7 @@ double SpectrumInfo::l2(const size_t index) const {
  */
 double SpectrumInfo::twoTheta(const size_t index) const {
   double twoTheta{0.0};
-  for (const auto detIndex : getDetectorIndices(index))
+  for (const auto &detIndex : checkAndGetSpectrumDefinition(index))
     twoTheta += m_detectorInfo.twoTheta(detIndex);
   return twoTheta / static_cast<double>(spectrumDefinition(index).size());
 }
@@ -87,7 +87,7 @@ double SpectrumInfo::twoTheta(const size_t index) const {
  */
 double SpectrumInfo::signedTwoTheta(const size_t index) const {
   double signedTwoTheta{0.0};
-  for (const auto detIndex : getDetectorIndices(index))
+  for (const auto &detIndex : checkAndGetSpectrumDefinition(index))
     signedTwoTheta += m_detectorInfo.signedTwoTheta(detIndex);
   return signedTwoTheta / static_cast<double>(spectrumDefinition(index).size());
 }
@@ -95,7 +95,7 @@ double SpectrumInfo::signedTwoTheta(const size_t index) const {
 /// Returns the position of the spectrum with given index.
 Kernel::V3D SpectrumInfo::position(const size_t index) const {
   Kernel::V3D newPos;
-  for (const auto detIndex : getDetectorIndices(index))
+  for (const auto &detIndex : checkAndGetSpectrumDefinition(index))
     newPos += m_detectorInfo.position(detIndex);
   return newPos / static_cast<double>(spectrumDefinition(index).size());
 }
@@ -118,7 +118,7 @@ bool SpectrumInfo::hasUniqueDetector(const size_t index) const {
  *
  * Currently this simply sets the mask flags for the underlying detectors. */
 void SpectrumInfo::setMasked(const size_t index, bool masked) {
-  for (const auto detIndex : getDetectorIndices(index))
+  for (const auto &detIndex : checkAndGetSpectrumDefinition(index))
     m_detectorInfo.setMasked(detIndex, masked);
 }
 
@@ -174,14 +174,12 @@ const Geometry::IDetector &SpectrumInfo::getDetector(const size_t index) const {
   return *m_lastDetector[thread];
 }
 
-std::vector<size_t> SpectrumInfo::getDetectorIndices(const size_t index) const {
-  std::vector<size_t> detIndices;
-  for (const auto &def : spectrumDefinition(index))
-    detIndices.push_back(def.first);
-  if (detIndices.empty())
+const SpectrumDefinition &
+SpectrumInfo::checkAndGetSpectrumDefinition(const size_t index) const {
+  if (spectrumDefinition(index).size() == 0)
     throw Kernel::Exception::NotFoundError(
         "SpectrumInfo: No detectors for this workspace index.", "");
-  return detIndices;
+  return spectrumDefinition(index);
 }
 
 } // namespace API
diff --git a/Framework/API/test/FunctionFactoryTest.h b/Framework/API/test/FunctionFactoryTest.h
index 83a34d7040d8aa71ebbca7131a9527e74aeaaab0..89225c19ab277d26fae3436bd578f76778a2c220 100644
--- a/Framework/API/test/FunctionFactoryTest.h
+++ b/Framework/API/test/FunctionFactoryTest.h
@@ -433,9 +433,9 @@ public:
   void test_MultiDomainFunction_creation_moreComplex() {
     const std::string fnString =
         "composite=MultiDomainFunction,NumDeriv=true;(name=FunctionFactoryTest_"
-        "FunctA,a0=0,a1=0.5;name=FunctionFactoryTest_FunctB,b0=0.1,ties="
+        "FunctA,a0=0,a1=0.5;name=FunctionFactoryTest_FunctB,b0=0.1,b1=0.2,ties="
         "(b1=0.2),$domains=i);(name=FunctionFactoryTest_FunctA,a0=0,a1=0.5;"
-        "name=FunctionFactoryTest_FunctB,b0=0.1,$domains=i);ties=(f1.f1."
+        "name=FunctionFactoryTest_FunctB,b0=0.1,b1=0.2,$domains=i);ties=(f1.f1."
         "b1=f0.f1.b1)";
     IFunction_sptr fun;
     TS_ASSERT_THROWS_NOTHING(
diff --git a/Framework/API/test/ImmutableCompositeFunctionTest.h b/Framework/API/test/ImmutableCompositeFunctionTest.h
index aa130916b7166c9361a286667c349a0242c67e25..e15872611464696244eb74df62828001e803c68d 100644
--- a/Framework/API/test/ImmutableCompositeFunctionTest.h
+++ b/Framework/API/test/ImmutableCompositeFunctionTest.h
@@ -279,10 +279,9 @@ public:
     icf.addTies("b2=b1,a2=a1/5");
     icf.applyTies();
 
-    TS_ASSERT_EQUALS(
-        icf.asString(),
-        "name=ImmutableCompositeFunctionTest_"
-        "Function,NumDeriv=false,a1=11,b1=12,ties=(a2=a1/5,b2=b1)");
+    TS_ASSERT_EQUALS(icf.asString(), "name=ImmutableCompositeFunctionTest_"
+                                     "Function,NumDeriv=false,a1=11,b1=12,a2=2."
+                                     "2,b2=12,ties=(a2=a1/5,b2=b1)");
 
     auto fun = FunctionFactory::Instance().createInitialized(icf.asString());
     TS_ASSERT(fun);
@@ -309,7 +308,7 @@ public:
 
     TS_ASSERT_EQUALS(icf.asString(), "name=ImmutableCompositeFunctionTest_"
                                      "FunctionWithTies,NumDeriv=false,a1=1,b1="
-                                     "2");
+                                     "2,a2=0.25,b2=1");
 
     auto fun = FunctionFactory::Instance().createInitialized(icf.asString());
     TS_ASSERT(fun);
diff --git a/Framework/API/test/MatrixWorkspaceTest.h b/Framework/API/test/MatrixWorkspaceTest.h
index e614b0ce668d62ea532f8f2f271d2c5254436eda..766b90592fe3d2f9964bb3a0d06bc374e6dec658 100644
--- a/Framework/API/test/MatrixWorkspaceTest.h
+++ b/Framework/API/test/MatrixWorkspaceTest.h
@@ -1,6 +1,7 @@
 #ifndef WORKSPACETEST_H_
 #define WORKSPACETEST_H_
 
+#include "MantidAPI/DetectorInfo.h"
 #include "MantidAPI/ISpectrum.h"
 #include "MantidAPI/MatrixWorkspace.h"
 #include "MantidAPI/NumericAxis.h"
@@ -59,6 +60,8 @@ boost::shared_ptr<MatrixWorkspace> makeWorkspaceWithDetectors(size_t numSpectra,
   for (size_t i = 0; i < ws2->getNumberHistograms(); ++i) {
     // Create a detector for each spectra
     Detector *det = new Detector("pixel", static_cast<detid_t>(i), inst.get());
+    det->setShape(
+        ComponentCreationHelper::createSphere(0.01, V3D(0, 0, 0), "1"));
     inst->add(det);
     inst->markAsDetector(det);
     ws2->getSpectrum(i).addDetectorID(static_cast<detid_t>(i));
@@ -1419,6 +1422,59 @@ public:
     TSM_ASSERT("Should not have any x resolution values", !ws.hasDx(3));
   }
 
+  void test_scanning() {
+    // Set up 2 workspaces to be merged
+    auto ws1 = makeWorkspaceWithDetectors(1, 1);
+    auto ws2 = makeWorkspaceWithDetectors(1, 1);
+    auto &detInfo1 = ws1->mutableDetectorInfo();
+    auto &detInfo2 = ws2->mutableDetectorInfo();
+    detInfo1.setPosition(0, {1, 0, 0});
+    detInfo2.setPosition(0, {2, 0, 0});
+    detInfo1.setScanInterval(0, {10, 20});
+    detInfo2.setScanInterval(0, {20, 30});
+
+    // Merge
+    auto merged = WorkspaceFactory::Instance().create(ws1, 2);
+    auto &detInfo = merged->mutableDetectorInfo();
+    detInfo.merge(detInfo2);
+
+    // Set up spectrum definitions with 1:1 mapping such that each spectrum
+    // corresponds to 1 time index of a detector.
+    auto specDefs = Kernel::make_cow<std::vector<SpectrumDefinition>>(2);
+    specDefs.access()[0].add(0, 0); // detector 0, time index 0
+    specDefs.access()[1].add(0, 1); // detector 0, time index 1
+    auto indexInfo = merged->indexInfo();
+    indexInfo.setDetectorIDs({0, 0}); // both spectra have detector ID 0
+    indexInfo.setSpectrumDefinitions(specDefs);
+    merged->setIndexInfo(indexInfo);
+
+    const auto &specInfo = merged->spectrumInfo();
+    TS_ASSERT(specInfo.hasDetectors(0));
+    TS_ASSERT(specInfo.hasDetectors(1));
+    TS_ASSERT_EQUALS(specInfo.position(0), V3D(1, 0, 0));
+    TS_ASSERT_EQUALS(specInfo.position(1), V3D(2, 0, 0));
+
+    TS_ASSERT_THROWS_NOTHING(specInfo.detector(0));
+    const auto &det = specInfo.detector(0);
+    // Failing legacy methods (use DetectorInfo/SpectrumInfo instead):
+    TS_ASSERT_THROWS(det.getPos(), std::runtime_error);
+    TS_ASSERT_THROWS(det.getRelativePos(), std::runtime_error);
+    TS_ASSERT_THROWS(det.getRotation(), std::runtime_error);
+    TS_ASSERT_THROWS(det.getRelativeRot(), std::runtime_error);
+    TS_ASSERT_THROWS(det.getPhi(), std::runtime_error);
+    // Failing methods, currently without replacement:
+    TS_ASSERT_THROWS(det.solidAngle(V3D(0, 0, 0)), std::runtime_error);
+    BoundingBox bb;
+    TS_ASSERT_THROWS(det.getBoundingBox(bb), std::runtime_error);
+    // Moving parent not possible since non-detector components do not have time
+    // indices and thus DetectorInfo cannot tell which set of detector positions
+    // to adjust.
+    TS_ASSERT_THROWS(detInfo.setPosition(*det.getParent(), V3D(1, 2, 3)),
+                     std::runtime_error);
+    TS_ASSERT_THROWS(detInfo.setRotation(*det.getParent(), Quat(1, 2, 3, 4)),
+                     std::runtime_error);
+  }
+
 private:
   Mantid::API::MantidImage_sptr createImage(const size_t width,
                                             const size_t height) {
diff --git a/Framework/API/test/MultiDomainFunctionTest.h b/Framework/API/test/MultiDomainFunctionTest.h
index 9990cdaec30357bb3e0425c611d73c20f9011f88..213ee8349fff64a0552e70a1eba1c01468a285d2 100644
--- a/Framework/API/test/MultiDomainFunctionTest.h
+++ b/Framework/API/test/MultiDomainFunctionTest.h
@@ -441,16 +441,33 @@ public:
   }
 
   void test_string_representation() {
-    const std::string expected =
-        "composite=MultiDomainFunction,NumDeriv=true;"
-        "name=MultiDomainFunctionTest_Function,A=0,B=1,$domains=i;"
-        "name=MultiDomainFunctionTest_Function,B=2,$domains=i;"
-        "name=MultiDomainFunctionTest_Function,B=3,$domains=i;ties=(f1.A="
-        "f0.A,f2.A=f0.A)";
+    const std::string expected = "composite=MultiDomainFunction,NumDeriv=true;"
+                                 "name=MultiDomainFunctionTest_Function,A=0,B="
+                                 "1,$domains=i;name=MultiDomainFunctionTest_"
+                                 "Function,A=0,B=2,$domains=i;name="
+                                 "MultiDomainFunctionTest_Function,A=0,B=3,$"
+                                 "domains=i;ties=(f1.A=f0.A,f2.A=f0.A)";
     TS_ASSERT_EQUALS(multi.asString(), expected);
     TS_ASSERT_EQUALS(multi.asString(), multi.clone()->asString());
   }
 
+  void test_equivalent_functions() {
+    std::string ini =
+        "composite=MultiDomainFunction;"
+        "name=MultiDomainFunctionTest_Function,A=1,B=2,$domains=i;"
+        "name=MultiDomainFunctionTest_Function,A=3,B=4,$domains=i;ties=(f1.A="
+        "f0.B)";
+    auto mfun = boost::dynamic_pointer_cast<CompositeFunction>(
+        FunctionFactory::Instance().createInitialized(ini));
+
+    auto eqFuns = mfun->createEquivalentFunctions();
+    TS_ASSERT_EQUALS(eqFuns.size(), 2);
+    TS_ASSERT_EQUALS(eqFuns[0]->asString(),
+                     "name=MultiDomainFunctionTest_Function,A=1,B=2");
+    TS_ASSERT_EQUALS(eqFuns[1]->asString(),
+                     "name=MultiDomainFunctionTest_Function,A=2,B=4");
+  }
+
 private:
   MultiDomainFunction multi;
   JointDomain domain;
diff --git a/Framework/Algorithms/inc/MantidAlgorithms/FilterEvents.h b/Framework/Algorithms/inc/MantidAlgorithms/FilterEvents.h
index 7e9e2cc805c730e8f954c6b2f51c2532713d40a0..93e29516d3ac1c6db2f3be39092287f3436680aa 100644
--- a/Framework/Algorithms/inc/MantidAlgorithms/FilterEvents.h
+++ b/Framework/Algorithms/inc/MantidAlgorithms/FilterEvents.h
@@ -82,12 +82,20 @@ private:
   /// Process user input properties
   void processAlgorithmProperties();
 
+  /// process splitters given by a SplittersWorkspace
   void processSplittersWorkspace();
 
-  ///
+  void processTableSplittersWorkspace();
+
+  /// process splitters given by a MatrixWorkspace
   void processMatrixSplitterWorkspace();
 
   void createOutputWorkspaces();
+  /// create output workspaces in the case of using TableWorlspace for splitters
+  void createOutputWorkspacesTableSplitterCase();
+  /// create output workspaces in the case of using MatrixWorkspace for
+  /// splitters
+  void createOutputWorkspacesMatrixCase();
 
   /// Set up detector calibration parameters
   void setupDetectorTOFCalibration();
@@ -115,17 +123,24 @@ private:
   /// Examine workspace
   void examineEventWS();
 
+  /// Convert SplittersWorkspace to vector of time and vector of target
+  /// (itarget)
+  void convertSplittersWorkspaceToVectors();
+
   DataObjects::EventWorkspace_sptr m_eventWS;
   DataObjects::SplittersWorkspace_sptr m_splittersWorkspace;
+  DataObjects::TableWorkspace_sptr m_splitterTableWorkspace;
   API::MatrixWorkspace_const_sptr m_matrixSplitterWS;
   DataObjects::TableWorkspace_sptr m_detCorrectWorkspace;
 
   /// Flag to use matrix splitters or table splitters
-  bool m_useTableSplitters;
+  bool m_useSplittersWorkspace;
+  bool m_useArbTableSplitters;
 
-  std::set<int> m_workGroupIndexes;
+  std::set<int> m_targetWorkspaceIndexSet;
+  int m_maxTargetIndex;
   Kernel::TimeSplitterType m_splitters;
-  std::map<int, DataObjects::EventWorkspace_sptr> m_outputWS;
+  std::map<int, DataObjects::EventWorkspace_sptr> m_outputWorkspacesMap;
   std::vector<std::string> m_wsNames;
 
   std::vector<double> m_detTofOffsets;
@@ -138,20 +153,41 @@ private:
 
   double m_progress;
 
+  /// DOC! TODO
   std::vector<std::string> getTimeSeriesLogNames();
 
   Kernel::TimeSplitterType generateSplitters(int wsindex);
 
+  void generateSplitterTSP(
+      std::vector<Kernel::TimeSeriesProperty<int> *> &split_tsp_vec);
+
+  void generateSplitterTSPalpha(
+      std::vector<Kernel::TimeSeriesProperty<int> *> &split_tsp_vec);
+
+  ///
+  void mapSplitterTSPtoWorkspaces(
+      const std::vector<Kernel::TimeSeriesProperty<int> *> &split_tsp_vec);
+
   void splitLog(DataObjects::EventWorkspace_sptr eventws, std::string logname,
                 Kernel::TimeSplitterType &splitters);
 
   /// Base of output workspace's name
   std::string m_outputWSNameBase;
 
+  /// TableWorkspace splitters: from target map to vector workspace group-index
+  /// These 2 maps are complimentary to each other
+  std::map<std::string, int> m_targetIndexMap;
+  std::map<int, std::string> m_wsGroupIndexTargetMap;
+
+  /// MatrixWorkspace splitters:
+  std::map<int, uint32_t> m_yIndexMap;
+  std::map<uint32_t, int> m_wsGroupdYMap;
+
   /// Flag to group workspace
   bool m_toGroupWS;
 
   /// Vector for splitting time
+  /// FIXME - shall we convert this to DateAndTime???.  Need to do speed test!
   std::vector<int64_t> m_vecSplitterTime;
   /// Vector for splitting grouip
   std::vector<int> m_vecSplitterGroup;
@@ -175,6 +211,8 @@ private:
   bool m_isSplittersRelativeTime;
   // Starting time for starting time of event filters
   Kernel::DateAndTime m_filterStartTime;
+  // EventWorkspace (aka. run)'s starting time
+  Kernel::DateAndTime m_runStartTime;
 };
 
 } // namespace Algorithms
diff --git a/Framework/Algorithms/src/ConvertToConstantL2.cpp b/Framework/Algorithms/src/ConvertToConstantL2.cpp
index 7f04f69704164ffcc92b315df5953ba17f946d58..8974a0974990115df00fb150ad09c68aed95dd8b 100644
--- a/Framework/Algorithms/src/ConvertToConstantL2.cpp
+++ b/Framework/Algorithms/src/ConvertToConstantL2.cpp
@@ -116,7 +116,7 @@ void ConvertToConstantL2::exec() {
     V3D newPos;
     newPos.spherical(m_l2, theta, phi);
 
-    const size_t detIndex = inputSpecInfo.spectrumDefinition(i)[0].first;
+    const auto detIndex = inputSpecInfo.spectrumDefinition(i)[0];
     outputDetInfo.setPosition(detIndex, newPos);
 
     m_outputWS->mutableX(i) -= deltaTOF;
diff --git a/Framework/Algorithms/src/FilterEvents.cpp b/Framework/Algorithms/src/FilterEvents.cpp
index 049be90c0c6efd8c7d5e89d6462c46a1bdd50a57..eb9cf26eecb0d3c5dffa01fa56f6c4dae04ec996 100644
--- a/Framework/Algorithms/src/FilterEvents.cpp
+++ b/Framework/Algorithms/src/FilterEvents.cpp
@@ -31,6 +31,13 @@ using namespace Mantid::Geometry;
 
 using namespace std;
 
+const int64_t TOLERANCE(1000000); // splitter time tolerance in nano-second.
+                                  // this value has resolution to 10000Hz
+
+/// (integer) splitting target for undefined region, which will be recorded in
+/// m_splitterGroup
+const uint32_t UNDEFINED_SPLITTING_TARGET(0);
+
 namespace Mantid {
 namespace Algorithms {
 
@@ -39,15 +46,17 @@ DECLARE_ALGORITHM(FilterEvents)
 /** Constructor
  */
 FilterEvents::FilterEvents()
-    : m_eventWS(), m_splittersWorkspace(), m_matrixSplitterWS(),
-      m_detCorrectWorkspace(), m_useTableSplitters(false), m_workGroupIndexes(),
-      m_splitters(), m_outputWS(), m_wsNames(), m_detTofOffsets(),
-      m_detTofFactors(), m_FilterByPulseTime(false), m_informationWS(),
-      m_hasInfoWS(), m_progress(0.), m_outputWSNameBase(), m_toGroupWS(false),
+    : m_eventWS(), m_splittersWorkspace(), m_splitterTableWorkspace(),
+      m_matrixSplitterWS(), m_detCorrectWorkspace(),
+      m_useSplittersWorkspace(false), m_useArbTableSplitters(false),
+      m_targetWorkspaceIndexSet(), m_splitters(), m_outputWorkspacesMap(),
+      m_wsNames(), m_detTofOffsets(), m_detTofFactors(),
+      m_FilterByPulseTime(false), m_informationWS(), m_hasInfoWS(),
+      m_progress(0.), m_outputWSNameBase(), m_toGroupWS(false),
       m_vecSplitterTime(), m_vecSplitterGroup(), m_splitSampleLogs(false),
       m_useDBSpectrum(false), m_dbWSIndex(-1), m_tofCorrType(),
       m_specSkipType(), m_vecSkip(), m_isSplittersRelativeTime(false),
-      m_filterStartTime(0) {}
+      m_filterStartTime(0), m_runStartTime(0) {}
 
 /** Declare Inputs
  */
@@ -157,15 +166,22 @@ void FilterEvents::exec() {
   // Parse splitters
   m_progress = 0.0;
   progress(m_progress, "Processing SplittersWorkspace.");
-  if (m_useTableSplitters)
+  if (m_useSplittersWorkspace)
     processSplittersWorkspace();
+  else if (m_useArbTableSplitters)
+    processTableSplittersWorkspace();
   else
     processMatrixSplitterWorkspace();
 
   // Create output workspaces
   m_progress = 0.1;
   progress(m_progress, "Create Output Workspaces.");
-  createOutputWorkspaces();
+  if (m_useArbTableSplitters)
+    createOutputWorkspacesTableSplitterCase();
+  else if (m_useSplittersWorkspace)
+    createOutputWorkspaces();
+  else
+    createOutputWorkspacesMatrixCase();
 
   // Optionall import corrections
   m_progress = 0.20;
@@ -180,12 +196,21 @@ void FilterEvents::exec() {
     progressamount = 0.6;
   else
     progressamount = 0.7;
-  if (m_useTableSplitters)
+
+  std::vector<Kernel::TimeSeriesProperty<int> *> split_tsp_vector;
+  if (m_useSplittersWorkspace) {
     filterEventsBySplitters(progressamount);
-  else
+    generateSplitterTSPalpha(split_tsp_vector);
+  } else {
     filterEventsByVectorSplitters(progressamount);
+    generateSplitterTSP(split_tsp_vector);
+  }
+
+  // TODO:FIXME - assign split_tsp_vector to all the output workspaces!
+  mapSplitterTSPtoWorkspaces(split_tsp_vector);
 
   // Optional to group detector
+  // TODO:FIXME - move this part to a method
   if (m_toGroupWS) {
     m_progress = 0.9;
     progress(m_progress, "Group workspaces");
@@ -203,10 +228,15 @@ void FilterEvents::exec() {
     }
   }
 
+  // TODO:FIXME - move this part to a method
   // Form the names of output workspaces
   std::vector<std::string> outputwsnames;
   std::map<int, DataObjects::EventWorkspace_sptr>::iterator miter;
-  for (miter = m_outputWS.begin(); miter != m_outputWS.end(); ++miter) {
+  // for (miter = m_outputWorkspacesMap.begin();
+  //     miter != m_outputWorkspacesMap.end(); ++miter) {
+  //  outputwsnames.push_back(miter->second->name());
+  for (miter = m_outputWorkspacesMap.begin();
+       miter != m_outputWorkspacesMap.end(); ++miter) {
     outputwsnames.push_back(miter->second->getName());
   }
   setProperty("OutputWorkspaceNames", outputwsnames);
@@ -215,6 +245,7 @@ void FilterEvents::exec() {
   progress(m_progress, "Completed");
 }
 
+//----------------------------------------------------------------------------------------------
 /** Process input properties
  */
 void FilterEvents::processAlgorithmProperties() {
@@ -231,12 +262,16 @@ void FilterEvents::processAlgorithmProperties() {
 
   m_splittersWorkspace =
       boost::dynamic_pointer_cast<SplittersWorkspace>(tempws);
+  m_splitterTableWorkspace =
+      boost::dynamic_pointer_cast<TableWorkspace>(tempws);
   if (m_splittersWorkspace) {
-    m_useTableSplitters = true;
-  } else {
+    m_useSplittersWorkspace = true;
+  } else if (m_splitterTableWorkspace)
+    m_useArbTableSplitters = true;
+  else {
     m_matrixSplitterWS = boost::dynamic_pointer_cast<MatrixWorkspace>(tempws);
     if (m_matrixSplitterWS) {
-      m_useTableSplitters = false;
+      m_useSplittersWorkspace = false;
     } else {
       throw runtime_error("Invalid type of input workspace, neither "
                           "SplittersWorkspace nor MatrixWorkspace.");
@@ -301,6 +336,13 @@ void FilterEvents::processAlgorithmProperties() {
   else
     m_useDBSpectrum = true;
 
+  // Get run start time
+  if (m_eventWS->run().hasProperty("run_start")) {
+    Kernel::DateAndTime run_start_time(
+        m_eventWS->run().getProperty("run_start")->value());
+    m_runStartTime = run_start_time;
+  }
+
   // Splitters are given relative time
   m_isSplittersRelativeTime = getProperty("RelativeTime");
   if (m_isSplittersRelativeTime) {
@@ -313,9 +355,7 @@ void FilterEvents::processAlgorithmProperties() {
     } else {
       // Retrieve filter starting time from property run_start as default
       if (m_eventWS->run().hasProperty("run_start")) {
-        Kernel::DateAndTime temp_shift_time(
-            m_eventWS->run().getProperty("run_start")->value());
-        m_filterStartTime = temp_shift_time;
+        m_filterStartTime = m_runStartTime;
       } else {
         throw std::runtime_error(
             "Input event workspace does not have property run_start. "
@@ -323,11 +363,14 @@ void FilterEvents::processAlgorithmProperties() {
             "Splitters cannot be in reltive time.");
       }
     }
-  }
+  } // END-IF: m_isSplitterRelativeTime
 }
 
-/** Examine whether any spectrum does not have detector
-  */
+//----------------------------------------------------------------------------------------------
+/**  Examine whether any spectrum does not have detector
+ * Warning message will be written out
+ * @brief FilterEvents::examineEventWS
+ */
 void FilterEvents::examineEventWS() {
   size_t numhist = m_eventWS->getNumberHistograms();
   m_vecSkip.resize(numhist, false);
@@ -335,9 +378,11 @@ void FilterEvents::examineEventWS() {
   if (m_specSkipType == EventFilterSkipNoDetTOFCorr &&
       m_tofCorrType == NoneCorrect) {
     // No TOF correction and skip spectrum only if TOF correction is required
-    g_log.notice("By user's choice, No spectrum will be skipped even if it has "
-                 "no detector.");
+    g_log.warning(
+        "By user's choice, No spectrum will be skipped even if it has "
+        "no detector.");
   } else {
+    // check detectors whether there is any of them that will be skipped
     stringstream msgss;
     size_t numskipspec = 0;
     size_t numeventsskip = 0;
@@ -356,7 +401,6 @@ void FilterEvents::examineEventWS() {
         else
           msgss << ",";
       }
-
     } // ENDFOR
 
     if (numskipspec > 0) {
@@ -372,13 +416,17 @@ void FilterEvents::examineEventWS() {
     }
 
   } // END-IF-ELSE
+
+  return;
 }
 
+//----------------------------------------------------------------------------------------------
 /** Purpose:
  *    Convert SplitterWorkspace object to TimeSplitterType (sorted vector)
  *    and create a map for all workspace group number
  *  Requirements:
- *  Gaurantees
+ *  Gaurantees:
+ *  - Update of m_maxTargetIndex: it can be zero in SplittersWorkspace case
  * @brief FilterEvents::processSplittersWorkspace
  */
 void FilterEvents::processSplittersWorkspace() {
@@ -387,10 +435,17 @@ void FilterEvents::processSplittersWorkspace() {
   m_splitters.reserve(numsplitters);
 
   // 2. Insert all splitters
+  m_maxTargetIndex = 0;
   bool inorder = true;
   for (size_t i = 0; i < numsplitters; i++) {
+    // push back the splitter in SplittersWorkspace to list of splitters
     m_splitters.push_back(m_splittersWorkspace->getSplitter(i));
-    m_workGroupIndexes.insert(m_splitters.back().index());
+    // add the target workspace index to target workspace indexes set
+    m_targetWorkspaceIndexSet.insert(m_splitters.back().index());
+    // register for the maximum target index
+    if (m_splitters.back().index() > m_maxTargetIndex)
+      m_maxTargetIndex = m_splitters.back().index();
+    // check whether the splitters are in time order
     if (inorder && i > 0 && m_splitters[i] < m_splitters[i - 1])
       inorder = false;
   }
@@ -403,13 +458,13 @@ void FilterEvents::processSplittersWorkspace() {
   }
 
   // 4. Add extra workgroup index for unfiltered events
-  m_workGroupIndexes.insert(-1);
+  m_targetWorkspaceIndexSet.insert(-1);
 
   // 5. Add information
   if (m_hasInfoWS) {
-    if (m_workGroupIndexes.size() > m_informationWS->rowCount() + 1) {
+    if (m_targetWorkspaceIndexSet.size() > m_informationWS->rowCount() + 1) {
       g_log.warning() << "Input Splitters Workspace has different entries ("
-                      << m_workGroupIndexes.size() - 1
+                      << m_targetWorkspaceIndexSet.size() - 1
                       << ") than input information workspaces ("
                       << m_informationWS->rowCount() << "). "
                       << "  Information may not be accurate. \n";
@@ -417,16 +472,214 @@ void FilterEvents::processSplittersWorkspace() {
   }
 }
 
+//----------------------------------------------------------------------------------------------
+/** Convert SplittersWorkspace to vector of time and vector of target (itarget)
+ * NOTE: This is designed to use a single vector/vector splitters for all types
+ * of inputs
+ *       It is not used before vast experiment on speed comparison!
+ * @brief FilterEvents::convertSplittersWorkspaceToVectors
+ */
+void FilterEvents::convertSplittersWorkspaceToVectors() {
+  // check: only applied for splitters given by SplittersWorkspace
+  assert(m_useSplittersWorkspace);
+
+  // clear and get ready
+  m_vecSplitterGroup.clear();
+  m_vecSplitterTime.clear();
+
+  // convert SplittersWorkspace to a set of pairs which can be sorted
+  size_t num_rows = this->m_splittersWorkspace->rowCount();
+  for (size_t irow = 0; irow < num_rows; ++irow) {
+    Kernel::SplittingInterval splitter =
+        m_splittersWorkspace->getSplitter(irow);
+    if (m_vecSplitterTime.size() == 0 ||
+        splitter.start() > m_vecSplitterTime.back() + TOLERANCE) {
+      m_vecSplitterTime.push_back(splitter.start().totalNanoseconds());
+      m_vecSplitterTime.push_back(splitter.stop().totalNanoseconds());
+      // 0 stands for not defined
+      m_vecSplitterGroup.push_back(0);
+      m_vecSplitterGroup.push_back(splitter.index());
+    } else if (splitter.start() < m_vecSplitterTime.back() - TOLERANCE) {
+      // almost same: then add the spliters.stop() only
+      m_vecSplitterTime.push_back(splitter.stop().totalNanoseconds());
+      m_vecSplitterGroup.push_back(splitter.index());
+    } else {
+      // have to insert the somewhere
+      std::vector<int64_t>::iterator finditer =
+          std::lower_bound(m_vecSplitterTime.begin(), m_vecSplitterTime.end(),
+                           splitter.start().totalNanoseconds());
+      // get the index
+      size_t split_index =
+          static_cast<size_t>(finditer - m_vecSplitterTime.begin());
+      if (*finditer - splitter.start().totalNanoseconds() > TOLERANCE) {
+        // the start time is before one splitter indicated by *finditer: insert
+        // both
+        // check
+        if (m_vecSplitterGroup[split_index] != UNDEFINED_SPLITTING_TARGET) {
+          std::stringstream errss;
+          errss << "Tried to insert splitter [" << splitter.start() << ", "
+                << splitter.stop() << "] but there is "
+                << "already a time entry with target "
+                << m_vecSplitterGroup[split_index] << " inside it.";
+          throw std::runtime_error(errss.str());
+        }
+        // inset the full set
+        m_vecSplitterTime.insert(finditer, splitter.stop().totalNanoseconds());
+        m_vecSplitterTime.insert(finditer, splitter.start().totalNanoseconds());
+        // insert the target
+        m_vecSplitterGroup.insert(m_vecSplitterGroup.begin() + split_index,
+                                  static_cast<int>(UNDEFINED_SPLITTING_TARGET));
+        m_vecSplitterGroup.insert(m_vecSplitterGroup.begin() + split_index,
+                                  static_cast<int>(splitter.index()));
+      } else if (*finditer - splitter.start().totalNanoseconds() > -TOLERANCE) {
+        // the start time is an existing entry
+        // check
+        if (m_vecSplitterGroup[split_index] != UNDEFINED_SPLITTING_TARGET) {
+          std::stringstream errss;
+          errss << "Tried to insert splitter [" << splitter.start() << ", "
+                << splitter.stop() << "] but there is "
+                << "already a time entry with target "
+                << m_vecSplitterGroup[split_index] << " inside it.";
+          throw std::runtime_error(errss.str());
+        }
+        // inset the stop time
+        m_vecSplitterTime.insert(finditer + 1,
+                                 splitter.stop().totalNanoseconds());
+        // insert the target
+        m_vecSplitterGroup.insert(m_vecSplitterGroup.begin() + split_index + 1,
+                                  splitter.index());
+      } else {
+        throw std::runtime_error("This is not a possible situation!");
+      }
+    } // IF-ELSE to add a new entry
+  }   // END-FOR (add all splitters)
+
+  return;
+}
+
+//----------------------------------------------------------------------------------------------
+/** process the input splitters given by a TableWorkspace
+ * The method will transfer the start/stop time to "m_vecSplitterTime"
+ * and map the splitting target (in string) to "m_vecSplitterGroup".
+ * The mapping will be recorded in "m_targetIndexMap" and
+ *"m_wsGroupIndexTargetMap".
+ * Also, "m_maxTargetIndex" is set up to record the highest target group/index,
+ * i.e., max value of m_vecSplitterGroup
+ *
+ * @brief FilterEvents::processTableSplittersWorkspace
+ */
+void FilterEvents::processTableSplittersWorkspace() {
+  // check input workspace's validity
+  assert(m_splitterTableWorkspace);
+  if (m_splitterTableWorkspace->columnCount() != 3) {
+    throw std::runtime_error(
+        "Splitters given in TableWorkspace must have 3 columns.");
+  }
+
+  // clear vector splitterTime and vector of splitter group
+  m_vecSplitterTime.clear();
+  m_vecSplitterGroup.clear();
+  bool found_undefined_splitter = false;
+
+  // get the run start time
+  int64_t filter_shift_time = m_runStartTime.totalNanoseconds();
+
+  int max_target_index = 1;
+
+  // convert TableWorkspace's values to vectors
+  size_t num_rows = m_splitterTableWorkspace->rowCount();
+  for (size_t irow = 0; irow < num_rows; ++irow) {
+    // get start and stop time
+    double start_time = m_splitterTableWorkspace->cell_cast<double>(irow, 0);
+    double stop_time = m_splitterTableWorkspace->cell_cast<double>(irow, 1);
+    std::string target = m_splitterTableWorkspace->cell<std::string>(irow, 2);
+
+    int64_t start_64 =
+        filter_shift_time + static_cast<int64_t>(start_time * 1.E9);
+    int64_t stop_64 =
+        filter_shift_time + static_cast<int64_t>(stop_time * 1.E9);
+
+    if (m_vecSplitterTime.size() == 0) {
+      // first splitter: push the start time to vector
+      m_vecSplitterTime.push_back(start_64);
+    } else if (start_64 - m_vecSplitterTime.back() > TOLERANCE) {
+      // the start time is way behind previous splitter's stop time
+      // create a new splitter and set the time interval in the middle to target
+      // -1
+      m_vecSplitterTime.push_back(start_64);
+      // NOTE: use index = 0 for un-defined slot
+      m_vecSplitterGroup.push_back(UNDEFINED_SPLITTING_TARGET);
+      found_undefined_splitter = true;
+    } else if (abs(start_64 - m_vecSplitterTime.back()) < TOLERANCE) {
+      // new splitter's start time is same (within tolerance) as the stop time
+      // of the previous
+      ;
+    } else {
+      // new splitter's start time is before the stop time of the last splitter.
+      throw std::runtime_error("Input table workspace does not have splitters "
+                               "set up in order, which is a requirement.");
+    }
+
+    // convert string-target to integer target
+    bool addnew = false;
+    int int_target(-1);
+    if (m_targetIndexMap.size() == 0) {
+      addnew = true;
+    } else {
+      std::map<std::string, int>::iterator mapiter =
+          m_targetIndexMap.find(target);
+      if (mapiter == m_targetIndexMap.end())
+        addnew = true;
+      else
+        int_target = mapiter->second;
+    }
+
+    // add a new ordered-integer-target
+    if (addnew) {
+      // target is not in map
+      int_target = max_target_index;
+      m_targetIndexMap.insert(std::pair<std::string, int>(target, int_target));
+      m_wsGroupIndexTargetMap.emplace(int_target, target);
+      this->m_targetWorkspaceIndexSet.insert(int_target);
+      max_target_index++;
+    }
+
+    // add start time, stop time and 'target
+    m_vecSplitterTime.push_back(stop_64);
+    m_vecSplitterGroup.push_back(int_target);
+  } // END-FOR (irow)
+
+  // record max target index
+  m_maxTargetIndex = max_target_index - 1;
+
+  // add un-defined splitter to map
+  if (found_undefined_splitter) {
+    m_targetIndexMap.emplace("undefined", 0);
+    m_wsGroupIndexTargetMap.emplace(0, "undefined");
+    m_targetWorkspaceIndexSet.insert(0);
+  }
+
+  return;
+}
+
+//----------------------------------------------------------------------------------------------
 /**
  * @brief FilterEvents::processMatrixSplitterWorkspace
  * Purpose:
- *   Convert the splitters in matrix workspace to a vector of splitters
+ *   Convert the splitters in MatrixWorkspace to m_vecSplitterTime and
+ * m_vecSplitterGroup
  * Requirements:
  *   m_matrixSplitterWS has valid value
  *   vecX's size must be one larger than and that of vecY of m_matrixSplitterWS
  * Guarantees
- *   Splitters stored in m_matrixSpliterWS are transformed to
- *   m_vecSplitterTime and m_workGroupIndexes, which are of same size
+ *  - Splitters stored in m_matrixSpliterWS are transformed to
+ *    "m_vecSplitterTime" and "m_vecSplitterGroup", whose sizes differ by 1.
+ *  - Y values are mapped to integer group index stored in "m_vecSplitterGroup".
+ *    The mapping is recorded in "m_yIndexMap" and "m_wsGroupdYMap"
+ *    "m_maxTargetIndex" is used to register the maximum group index
+ *    Negative Y is defined as "undefined"
+ * Note: there is NO undefined split region here, while any NEGATIVE Y value is
+ * defined as "undefined splitter"
  */
 void FilterEvents::processMatrixSplitterWorkspace() {
   // Check input workspace validity
@@ -452,13 +705,49 @@ void FilterEvents::processMatrixSplitterWorkspace() {
       m_vecSplitterTime[i] += time_shift_ns;
   }
 
+  // process the group
+  uint32_t max_target_index = 1;
+
   for (size_t i = 0; i < sizey; ++i) {
-    m_vecSplitterGroup[i] = static_cast<int>(Y[i]);
-    m_workGroupIndexes.insert(m_vecSplitterGroup[i]);
+
+    int y_index = static_cast<int>(Y[i]);
+
+    // try to find Y[i] in m_yIndexMap
+    std::map<int, uint32_t>::iterator mapiter = m_yIndexMap.find(y_index);
+    if (mapiter == m_yIndexMap.end()) {
+      // new
+      // default to 0 as undefined slot.
+      uint32_t int_target = UNDEFINED_SPLITTING_TARGET;
+      //  if well-defined, then use the current
+      if (y_index >= 0) {
+        int_target = max_target_index;
+        ++max_target_index;
+      }
+
+      // un-defined or un-filtered
+      m_vecSplitterGroup[i] = int_target;
+
+      // add to maps and etc.
+      m_yIndexMap.emplace(y_index, int_target);
+      m_wsGroupdYMap.emplace(int_target, y_index);
+      m_targetWorkspaceIndexSet.insert(int_target);
+    } else {
+      // this target Y-index has been registered previously
+      uint32_t target_index = mapiter->second;
+      m_vecSplitterGroup[i] = target_index;
+    }
   }
+
+  // register the max target integer
+  m_maxTargetIndex = max_target_index - 1;
+
+  return;
 }
 
-/** Create a list of EventWorkspace for output
+//----------------------------------------------------------------------------------------------
+/** Create a list of EventWorkspace for output in the case that splitters are
+ * given by
+ *  SplittersWorkspace
  */
 void FilterEvents::createOutputWorkspaces() {
 
@@ -473,7 +762,7 @@ void FilterEvents::createOutputWorkspaces() {
 
   // Determine the minimum group index number
   int minwsgroup = INT_MAX;
-  for (auto wsgroup : m_workGroupIndexes) {
+  for (auto wsgroup : m_targetWorkspaceIndexSet) {
     if (wsgroup < minwsgroup && wsgroup >= 0)
       minwsgroup = wsgroup;
   }
@@ -487,10 +776,10 @@ void FilterEvents::createOutputWorkspaces() {
 
   // Set up new workspaces
   int numoutputws = 0;
-  double numnewws = static_cast<double>(m_workGroupIndexes.size());
+  double numnewws = static_cast<double>(m_targetWorkspaceIndexSet.size());
   double wsgindex = 0.;
 
-  for (auto const wsgroup : m_workGroupIndexes) {
+  for (auto const wsgroup : m_targetWorkspaceIndexSet) {
     // Generate new workspace name
     bool add2output = true;
     std::stringstream wsname;
@@ -504,7 +793,7 @@ void FilterEvents::createOutputWorkspaces() {
 
     boost::shared_ptr<EventWorkspace> optws =
         create<DataObjects::EventWorkspace>(*m_eventWS);
-    m_outputWS.emplace(wsgroup, optws);
+    m_outputWorkspacesMap.emplace(wsgroup, optws);
 
     // Add information, including title and comment, to output workspace
     if (m_hasInfoWS) {
@@ -522,7 +811,7 @@ void FilterEvents::createOutputWorkspaces() {
       }
       optws->setComment(info);
       optws->setTitle(info);
-    }
+    } // END-IF infor WS
 
     // Add to output properties.  There shouldn't be any workspace
     // (non-unfiltered) skipped from group index
@@ -569,6 +858,171 @@ void FilterEvents::createOutputWorkspaces() {
   g_log.information("Output workspaces are created. ");
 }
 
+//----------------------------------------------------------------------------------------------
+/** Create output EventWorkspaces in the case that the splitters are given by
+ * MatrixWorkspace
+ * Here is the list of class variables that will be updated:
+ * - m_outputWorkspacesMap: use (integer) group index to find output
+ * EventWorkspace
+ * - m_wsNames: vector of output workspaces
+ * @brief FilterEvents::createOutputWorkspacesMatrixCase
+ */
+void FilterEvents::createOutputWorkspacesMatrixCase() {
+  // check condition
+  if (!m_matrixSplitterWS) {
+    g_log.error("createOutputWorkspacesMatrixCase() is applied to "
+                "MatrixWorkspace splitters only!");
+    throw std::runtime_error("Wrong call!");
+  }
+
+  // set up new workspaces
+  // Note: m_targetWorkspaceIndexSet is used in different manner among
+  // SplittersWorkspace, MatrixWorkspace and TableWorkspace cases
+  size_t numoutputws = m_targetWorkspaceIndexSet.size();
+  size_t wsgindex = 0;
+
+  for (auto const wsgroup : m_targetWorkspaceIndexSet) {
+    if (wsgroup < 0)
+      throw std::runtime_error("It is not possible to have split-target group "
+                               "index < 0 in MatrixWorkspace case.");
+
+    // workspace name
+    std::stringstream wsname;
+    if (wsgroup > 0) {
+      //  std::string target_name = m_wsGroupIndexTargetMap[wsgroup];
+      int target_name = m_wsGroupdYMap[wsgroup];
+      wsname << m_outputWSNameBase << "_" << target_name;
+    } else {
+      wsname << m_outputWSNameBase << "_unfiltered";
+    }
+
+    // create new workspace from input EventWorkspace and all the sample logs
+    // are copied to the new one
+    boost::shared_ptr<EventWorkspace> optws =
+        create<DataObjects::EventWorkspace>(*m_eventWS);
+    m_outputWorkspacesMap.emplace(wsgroup, optws);
+
+    // add to output workspace property
+    std::stringstream propertynamess;
+    if (wsgroup == 0) {
+      propertynamess << "OutputWorkspace_unfiltered";
+    } else {
+      propertynamess << "OutputWorkspace_" << wsgroup;
+    }
+
+    // Inserted this pair to map
+    m_wsNames.push_back(wsname.str());
+
+    // Set (property) to output workspace and set to ADS
+    declareProperty(Kernel::make_unique<
+                        API::WorkspaceProperty<DataObjects::EventWorkspace>>(
+                        propertynamess.str(), wsname.str(), Direction::Output),
+                    "Output");
+    setProperty(propertynamess.str(), optws);
+    AnalysisDataService::Instance().addOrReplace(wsname.str(), optws);
+
+    g_log.debug() << "Created output Workspace of group = " << wsgroup
+                  << "  Property Name = " << propertynamess.str()
+                  << " Workspace name = " << wsname.str()
+                  << " with Number of events = " << optws->getNumberEvents()
+                  << "\n";
+
+    // Update progress report
+    m_progress =
+        0.1 +
+        0.1 * static_cast<double>(wsgindex) / static_cast<double>(numoutputws);
+    progress(m_progress, "Creating output workspace");
+    wsgindex += 1;
+  } // END-FOR (wsgroup)
+
+  // Set output and do debug report
+  g_log.debug() << "Output workspace number: " << numoutputws << "\n";
+  setProperty("NumberOutputWS", static_cast<int>(numoutputws));
+
+  return;
+}
+
+//----------------------------------------------------------------------------------------------
+/** Create output EventWorkspaces in the case that the splitters are given by
+ * TableWorkspace
+ * Here is the list of class variables that will be updated:
+ * - m_outputWorkspacesMap: use (integer) group index to find output
+ * EventWorkspace
+ * - m_wsNames: vector of output workspaces
+ * @brief FilterEvents::createOutputWorkspacesMatrixCase
+ */
+void FilterEvents::createOutputWorkspacesTableSplitterCase() {
+  // check condition
+  if (!m_useArbTableSplitters) {
+    g_log.error("createOutputWorkspacesTableSplitterCase() is applied to "
+                "TableWorkspace splitters only!");
+    throw std::runtime_error("Wrong call!");
+  }
+
+  // set up new workspaces
+  size_t numoutputws = m_targetWorkspaceIndexSet.size();
+  size_t wsgindex = 0;
+
+  for (auto const wsgroup : m_targetWorkspaceIndexSet) {
+    if (wsgroup < 0)
+      throw std::runtime_error("It is not possible to have split-target group "
+                               "index < 0 in TableWorkspace case.");
+
+    // workspace name
+    std::stringstream wsname;
+    if (wsgroup > 0) {
+      // get target name via map
+      std::string target_name = m_wsGroupIndexTargetMap[wsgroup];
+      wsname << m_outputWSNameBase << "_" << target_name;
+    } else {
+      wsname << m_outputWSNameBase << "_unfiltered";
+    }
+
+    // create new workspace
+    boost::shared_ptr<EventWorkspace> optws =
+        create<DataObjects::EventWorkspace>(*m_eventWS);
+    m_outputWorkspacesMap.emplace(wsgroup, optws);
+
+    // add to output workspace property
+    std::stringstream propertynamess;
+    if (wsgroup < 0) {
+      propertynamess << "OutputWorkspace_unfiltered";
+    } else {
+      propertynamess << "OutputWorkspace_" << wsgroup;
+    }
+
+    // Inserted this pair to map
+    m_wsNames.push_back(wsname.str());
+
+    // Set (property) to output workspace and set to ADS
+    declareProperty(Kernel::make_unique<
+                        API::WorkspaceProperty<DataObjects::EventWorkspace>>(
+                        propertynamess.str(), wsname.str(), Direction::Output),
+                    "Output");
+    setProperty(propertynamess.str(), optws);
+    AnalysisDataService::Instance().addOrReplace(wsname.str(), optws);
+
+    g_log.debug() << "Created output Workspace of group = " << wsgroup
+                  << "  Property Name = " << propertynamess.str()
+                  << " Workspace name = " << wsname.str()
+                  << " with Number of events = " << optws->getNumberEvents()
+                  << "\n";
+
+    // Update progress report
+    m_progress =
+        0.1 +
+        0.1 * static_cast<double>(wsgindex) / static_cast<double>(numoutputws);
+    progress(m_progress, "Creating output workspace");
+    wsgindex += 1;
+  } // END-FOR (wsgroup)
+
+  // Set output and do debug report
+  g_log.debug() << "Output workspace number: " << numoutputws << "\n";
+  setProperty("NumberOutputWS", static_cast<int>(numoutputws));
+
+  return;
+}
+
 /** Set up neutron event's TOF correction.
   * It can be (1) parsed from TOF-correction table workspace to vectors,
   * (2) created according to detector's position in instrument;
@@ -813,7 +1267,7 @@ void FilterEvents::filterEventsBySplitters(double progressamount) {
       // Get the output event lists (should be empty) to be a map
       std::map<int, DataObjects::EventList *> outputs;
       PARALLEL_CRITICAL(build_elist) {
-        for (auto &ws : m_outputWS) {
+        for (auto &ws : m_outputWorkspacesMap) {
           int index = ws.first;
           auto &output_el = ws.second->getSpectrum(iws);
           outputs.emplace(index, &output_el);
@@ -850,12 +1304,14 @@ void FilterEvents::filterEventsBySplitters(double progressamount) {
 
   auto lognames = this->getTimeSeriesLogNames();
   g_log.debug() << "[FilterEvents D1214]:  Number of TimeSeries Logs = "
-                << lognames.size() << " to " << m_outputWS.size()
+                << lognames.size() << " to " << m_outputWorkspacesMap.size()
                 << " outptu workspaces. \n";
 
-  double numws = static_cast<double>(m_outputWS.size());
+  double numws = static_cast<double>(m_outputWorkspacesMap.size());
   double outwsindex = 0.;
-  for (auto &ws : m_outputWS) {
+
+  // split sample logs from original workspace to new one
+  for (auto &ws : m_outputWorkspacesMap) {
     int wsindex = ws.first;
     DataObjects::EventWorkspace_sptr opws = ws.second;
 
@@ -876,6 +1332,7 @@ void FilterEvents::filterEventsBySplitters(double progressamount) {
     }
 
     // Split log
+    // FIXME-TODO: SHALL WE MOVE THIS PART OUTSIDE OF THIS METHOD?
     size_t numlogs = lognames.size();
     for (size_t ilog = 0; ilog < numlogs; ++ilog) {
       this->splitLog(opws, lognames[ilog], splitters);
@@ -895,8 +1352,13 @@ void FilterEvents::filterEventsByVectorSplitters(double progressamount) {
 
   // Loop over the histograms (detector spectra) to do split from 1 event list
   // to N event list
-  g_log.debug() << "Number of spectra in input/source EventWorkspace = "
-                << numberOfSpectra << ".\n";
+  g_log.notice() << "Filter by vector splitters: Number of spectra in "
+                    "input/source EventWorkspace = " << numberOfSpectra
+                 << ".\n";
+
+  for (size_t i = 0; i < m_vecSplitterGroup.size(); ++i)
+    std::cout << "splitter " << i << ": " << m_vecSplitterTime[i] << ", "
+              << m_vecSplitterGroup[i] << "\n";
 
   PARALLEL_FOR_NO_WSP_CHECK()
   for (int64_t iws = 0; iws < int64_t(numberOfSpectra); ++iws) {
@@ -907,7 +1369,7 @@ void FilterEvents::filterEventsByVectorSplitters(double progressamount) {
       // Get the output event lists (should be empty) to be a map
       map<int, DataObjects::EventList *> outputs;
       PARALLEL_CRITICAL(build_elist) {
-        for (auto &ws : m_outputWS) {
+        for (auto &ws : m_outputWorkspacesMap) {
           int index = ws.first;
           auto &output_el = ws.second->getSpectrum(iws);
           outputs.emplace(index, &output_el);
@@ -950,6 +1412,111 @@ void FilterEvents::filterEventsByVectorSplitters(double progressamount) {
 
   g_log.notice("Splitters in format of Matrixworkspace are not recommended to "
                "split sample logs. ");
+
+  // split sample logs
+
+  // find the maximum index of the outputs' index
+  std::set<int>::iterator target_iter;
+  int max_target_index = 0;
+  for (target_iter = m_targetWorkspaceIndexSet.begin();
+       target_iter != m_targetWorkspaceIndexSet.end(); ++target_iter) {
+    if (*target_iter > max_target_index)
+      max_target_index = *target_iter;
+  }
+
+  // convert vector of int64 to Time
+  std::vector<Kernel::DateAndTime> split_datetime_vec(m_vecSplitterTime.size());
+  for (size_t i = 0; i < m_vecSplitterTime.size(); ++i) {
+    DateAndTime split_time(m_vecSplitterTime[i]);
+    split_datetime_vec[i] = split_time;
+  }
+
+  for (auto property : m_eventWS->run().getProperties()) {
+    // insert 0 even if it is empty for contructing a vector
+    g_log.debug() << "Process sample log" << property->name() << "\n";
+    TimeSeriesProperty<double> *dbl_prop =
+        dynamic_cast<TimeSeriesProperty<double> *>(property);
+    TimeSeriesProperty<int> *int_prop =
+        dynamic_cast<TimeSeriesProperty<int> *>(property);
+    if (dbl_prop) {
+      std::vector<TimeSeriesProperty<double> *> output_vector;
+      for (int tindex = 0; tindex <= max_target_index; ++tindex) {
+        TimeSeriesProperty<double> *new_property =
+            new TimeSeriesProperty<double>(dbl_prop->name());
+        output_vector.push_back(new_property);
+      }
+
+      // split
+      dbl_prop->splitByTimeVector(split_datetime_vec, m_vecSplitterGroup,
+                                  output_vector);
+
+      // set to output workspace
+      for (int tindex = 0; tindex <= max_target_index; ++tindex) {
+        // find output workspace
+        std::map<int, DataObjects::EventWorkspace_sptr>::iterator wsiter;
+        wsiter = m_outputWorkspacesMap.find(tindex);
+        if (wsiter == m_outputWorkspacesMap.end()) {
+          g_log.error() << "Workspace target (" << tindex
+                        << ") does not have workspace associated."
+                        << "\n";
+        } else {
+          DataObjects::EventWorkspace_sptr ws_i = wsiter->second;
+          ws_i->mutableRun().addProperty(output_vector[tindex], true);
+        }
+      }
+
+    } else if (int_prop) {
+      // integer log
+      std::vector<TimeSeriesProperty<int> *> output_vector;
+      for (int tindex = 0; tindex <= max_target_index; ++tindex) {
+        TimeSeriesProperty<int> *new_property =
+            new TimeSeriesProperty<int>(int_prop->name());
+        output_vector.push_back(new_property);
+      }
+
+      // split
+      int_prop->splitByTimeVector(split_datetime_vec, m_vecSplitterGroup,
+                                  output_vector);
+
+      // set to output workspace
+      for (int tindex = 0; tindex <= max_target_index; ++tindex) {
+        // find output workspace
+        std::map<int, DataObjects::EventWorkspace_sptr>::iterator wsiter;
+        wsiter = m_outputWorkspacesMap.find(tindex);
+        if (wsiter == m_outputWorkspacesMap.end()) {
+          g_log.error() << "Workspace target (" << tindex
+                        << ") does not have workspace associated."
+                        << "\n";
+        } else {
+          DataObjects::EventWorkspace_sptr ws_i = wsiter->second;
+          ws_i->mutableRun().addProperty(output_vector[tindex], true);
+        }
+      }
+    } else {
+      // TODO:FIXME - Copy the prperty!
+      // set to output workspace ??? -- may not be needed! as the way how output
+      // workspace is created
+    }
+  }
+
+  for (int tindex = 0; tindex <= max_target_index; ++tindex) {
+    // set to output workspace
+    for (int tindex = 0; tindex <= max_target_index; ++tindex) {
+      // find output workspace
+      std::map<int, DataObjects::EventWorkspace_sptr>::iterator wsiter;
+      wsiter = m_outputWorkspacesMap.find(tindex);
+      if (wsiter == m_outputWorkspacesMap.end()) {
+        g_log.error() << "Workspace target (" << tindex
+                      << ") does not have workspace associated."
+                      << "\n";
+      } else {
+        DataObjects::EventWorkspace_sptr ws_i = wsiter->second;
+        ws_i->mutableRun().integrateProtonCharge();
+      }
+    }
+  }
+
+  return;
 }
 
 /** Generate splitters for specified workspace index as a subset of
@@ -1000,6 +1567,141 @@ void FilterEvents::splitLog(EventWorkspace_sptr eventws, std::string logname,
   }
 }
 
+/** Generate a vector of integer time series property for each splitter
+ * corresponding to each target (in integer)
+ * in each splitter-time-series-property, 1 stands for include and 0 stands for
+ * time for neutrons to be discarded.
+ * If there is no UN-DEFINED
+ * @brief FilterEvents::generateSplitterTSP
+ * @param split_tsp_vec
+ */
+void FilterEvents::generateSplitterTSP(
+    std::vector<Kernel::TimeSeriesProperty<int> *> &split_tsp_vec) {
+  // clear vector to set up
+  split_tsp_vec.clear();
+
+  // initialize m_maxTargetIndex + 1 time series properties in integer
+  for (int itarget = 0; itarget <= m_maxTargetIndex; ++itarget) {
+    Kernel::TimeSeriesProperty<int> *split_tsp =
+        new Kernel::TimeSeriesProperty<int>("splitter");
+    split_tsp_vec.push_back(split_tsp);
+    // add initial value
+    split_tsp->addValue(Kernel::DateAndTime(m_runStartTime), 0);
+  }
+
+  // start to go through  m_vecSplitterTime (int64) and m_vecSplitterGroup add
+  // each entry to corresponding splitter TSP
+  for (size_t igrp = 0; igrp < m_vecSplitterGroup.size(); ++igrp) {
+    int itarget = m_vecSplitterGroup[igrp];
+    DateAndTime start_time(m_vecSplitterTime[igrp]);
+    if (start_time <= m_runStartTime) {
+      // clear the initial value with check first
+      if (split_tsp_vec[itarget]->size() != 1) {
+        g_log.error() << "With start time " << start_time
+                      << " same as run start time " << m_runStartTime
+                      << ", the TSP must have only 1 entry from "
+                         "initialization.  But not it has "
+                      << split_tsp_vec[itarget]->size() << "entries\n";
+        throw std::runtime_error("Coding logic error");
+      }
+      split_tsp_vec[itarget]->clear();
+    }
+    DateAndTime stop_time(m_vecSplitterTime[igrp + 1]);
+    split_tsp_vec[itarget]->addValue(start_time, 1);
+    split_tsp_vec[itarget]->addValue(stop_time, 0);
+  }
+
+  return;
+}
+
+/** Generate the splitter's time series property (log) the splitters workspace
+ * @brief FilterEvents::generateSplitterTSPalpha
+ * @param split_tsp_vec
+ */
+void FilterEvents::generateSplitterTSPalpha(
+    std::vector<Kernel::TimeSeriesProperty<int> *> &split_tsp_vec) {
+  // clear vector to set up
+  split_tsp_vec.clear();
+
+  // initialize m_maxTargetIndex + 1 time series properties in integer
+  // TODO:FIXME - shall not use m_maxTargetIndex, because it is not set for
+  // SplittersWorkspace-type splitters
+  g_log.debug() << "Maximum target index = " << m_maxTargetIndex << "\n";
+  if (m_maxTargetIndex < 0)
+    throw std::runtime_error("Maximum target index cannot be negative.");
+
+  // initialize the target index
+  for (int itarget = 0; itarget <= m_maxTargetIndex; ++itarget) {
+    Kernel::TimeSeriesProperty<int> *split_tsp =
+        new Kernel::TimeSeriesProperty<int>("splitter");
+    split_tsp->addValue(m_runStartTime, 0);
+    split_tsp_vec.push_back(split_tsp);
+  }
+
+  for (SplittingInterval splitter : m_splitters) {
+    int itarget = splitter.index();
+    if (itarget >= static_cast<int>(split_tsp_vec.size()))
+      throw std::runtime_error("Target workspace index is out of range!");
+
+    if (splitter.start() == m_runStartTime) {
+      // there should be only 1 value in the splitter and clear it.
+      if (split_tsp_vec[itarget]->size() != 1) {
+        throw std::runtime_error(
+            "Splitter must have 1 value with initialization.");
+      }
+      split_tsp_vec[itarget]->clear();
+    }
+    split_tsp_vec[itarget]->addValue(splitter.start(), 1);
+    split_tsp_vec[itarget]->addValue(splitter.stop(), 0);
+  }
+
+  return;
+}
+
+/** add the splitter TimeSeriesProperty logs to each workspace
+ * @brief FilterEvents::mapSplitterTSPtoWorkspaces
+ * @param split_tsp_vec
+ */
+void FilterEvents::mapSplitterTSPtoWorkspaces(
+    const std::vector<Kernel::TimeSeriesProperty<int> *> &split_tsp_vec) {
+  if (m_useSplittersWorkspace) {
+    g_log.debug() << "There are " << split_tsp_vec.size()
+                  << " TimeSeriesPropeties.\n";
+    std::map<int, DataObjects::EventWorkspace_sptr>::iterator miter;
+    for (miter = m_outputWorkspacesMap.begin();
+         miter != m_outputWorkspacesMap.end(); ++miter) {
+      g_log.debug() << "Output workspace index: " << miter->first << "\n";
+      if (0 <= miter->first &&
+          miter->first < static_cast<int>(split_tsp_vec.size())) {
+        DataObjects::EventWorkspace_sptr outws = miter->second;
+        outws->mutableRun().addProperty(split_tsp_vec[miter->first]);
+      }
+    }
+  } else {
+    // Either Table-type or Matrix-type splitters
+    for (int itarget = 0; itarget < static_cast<int>(split_tsp_vec.size());
+         ++itarget) {
+      // use itarget to find the workspace that is mapped
+      std::map<int, DataObjects::EventWorkspace_sptr>::iterator ws_iter;
+      ws_iter = m_outputWorkspacesMap.find(itarget);
+
+      // skip if an itarget does not have matched workspace
+      if (ws_iter == m_outputWorkspacesMap.end()) {
+        g_log.warning() << "iTarget " << itarget
+                        << " does not have any workspace associated.\n";
+        continue;
+      }
+
+      // get the workspace and add property
+      DataObjects::EventWorkspace_sptr outws = ws_iter->second;
+      outws->mutableRun().addProperty(split_tsp_vec[itarget]);
+    }
+
+  } // END-IF-ELSE (splitter-type)
+
+  return;
+}
+
 /** Get all filterable logs' names (double and integer)
  * @returns Vector of names of logs
  */
diff --git a/Framework/Algorithms/test/FilterEventsTest.h b/Framework/Algorithms/test/FilterEventsTest.h
index d2fd217c53884cc6914523a92d27d1b0dfa06039..9e75dcfcd651d4c00b8439d28e059a6f910f8d02 100644
--- a/Framework/Algorithms/test/FilterEventsTest.h
+++ b/Framework/Algorithms/test/FilterEventsTest.h
@@ -7,6 +7,7 @@
 #include "MantidAPI/SpectrumInfo.h"
 #include "MantidAPI/TableRow.h"
 #include "MantidAlgorithms/FilterEvents.h"
+#include "MantidDataObjects/TableWorkspace.h"
 #include "MantidDataObjects/EventList.h"
 #include "MantidDataObjects/EventWorkspace.h"
 #include "MantidDataObjects/Events.h"
@@ -88,7 +89,17 @@ public:
     * In this test
    *  (1) Leave correction table workspace empty
    *  (2) Count events in each output including "-1", the excluded/unselected
-   *events
+   *      events
+   *
+   *  Splitter-log test: each output workspace should have a sample log named
+   *"splitter", which
+   *  is created by FilterEvents to record the splitters for the corresponding
+   *workspace
+   *  1: 20000000000, 20035000000, 0
+   *  2: 20035000000, 20195000000, 1
+   *  3: 20200000000, 20265000000, 2
+   *  4: 20300000000, 20365000000, 2
+   *  5: 20400000000, 20465000000, 2
    */
   void test_FilterNoCorrection() {
     // Create EventWorkspace and SplittersWorkspace
@@ -131,6 +142,18 @@ public:
     TS_ASSERT_EQUALS(filteredws0->getSpectrum(0).getNumberEvents(), 4);
     TS_ASSERT_EQUALS(filteredws0->run().getProtonCharge(), 10);
 
+    // check splitter log
+    TS_ASSERT(filteredws0->run().hasProperty("splitter"));
+    Kernel::TimeSeriesProperty<int> *splitter0 =
+        dynamic_cast<Kernel::TimeSeriesProperty<int> *>(
+            filteredws0->run().getProperty("splitter"));
+    TS_ASSERT(splitter0);
+    TS_ASSERT_EQUALS(splitter0->size(), 2);
+    TS_ASSERT_EQUALS(splitter0->nthTime(0), Kernel::DateAndTime(runstart_i64));
+    TS_ASSERT_EQUALS(splitter0->nthValue(0), 1);
+    TS_ASSERT_EQUALS(splitter0->nthTime(1), Kernel::DateAndTime(20035000000));
+    TS_ASSERT_EQUALS(splitter0->nthValue(1), 0);
+
     // Check Workspace group 1
     EventWorkspace_sptr filteredws1 =
         boost::dynamic_pointer_cast<EventWorkspace>(
@@ -139,6 +162,20 @@ public:
     TS_ASSERT_EQUALS(filteredws1->getSpectrum(1).getNumberEvents(), 16);
     TS_ASSERT_EQUALS(filteredws1->run().getProtonCharge(), 11);
 
+    // check splitter log
+    TS_ASSERT(filteredws0->run().hasProperty("splitter"));
+    Kernel::TimeSeriesProperty<int> *splitter1 =
+        dynamic_cast<Kernel::TimeSeriesProperty<int> *>(
+            filteredws1->run().getProperty("splitter"));
+    TS_ASSERT(splitter1);
+    TS_ASSERT_EQUALS(splitter1->size(), 3);
+    TS_ASSERT_EQUALS(splitter1->nthTime(0), Kernel::DateAndTime(runstart_i64));
+    TS_ASSERT_EQUALS(splitter1->nthValue(0), 0);
+    TS_ASSERT_EQUALS(splitter1->nthTime(1), Kernel::DateAndTime(20035000000));
+    TS_ASSERT_EQUALS(splitter1->nthValue(1), 1);
+    TS_ASSERT_EQUALS(splitter1->nthTime(2), Kernel::DateAndTime(20195000000));
+    TS_ASSERT_EQUALS(splitter1->nthValue(2), 0);
+
     // Check Workspace group 2
     EventWorkspace_sptr filteredws2 =
         boost::dynamic_pointer_cast<EventWorkspace>(
@@ -161,6 +198,32 @@ public:
     TS_ASSERT_DELTA(eventmax.tof(), static_cast<double>(tofdt * 6 / 1000),
                     1.0E-4);
 
+    // check splitter log
+    TS_ASSERT(filteredws2->run().hasProperty("splitter"));
+    Kernel::TimeSeriesProperty<int> *splitter2 =
+        dynamic_cast<Kernel::TimeSeriesProperty<int> *>(
+            filteredws2->run().getProperty("splitter"));
+    TS_ASSERT(splitter2);
+    TS_ASSERT_EQUALS(splitter2->size(), 7);
+
+    TS_ASSERT_EQUALS(splitter2->nthTime(0), Kernel::DateAndTime(runstart_i64));
+    TS_ASSERT_EQUALS(splitter2->nthValue(0), 0);
+
+    TS_ASSERT_EQUALS(splitter2->nthTime(1), Kernel::DateAndTime(20200000000));
+    TS_ASSERT_EQUALS(splitter2->nthValue(1), 1);
+    TS_ASSERT_EQUALS(splitter2->nthTime(2), Kernel::DateAndTime(20265000000));
+    TS_ASSERT_EQUALS(splitter2->nthValue(2), 0);
+
+    TS_ASSERT_EQUALS(splitter2->nthTime(3), Kernel::DateAndTime(20300000000));
+    TS_ASSERT_EQUALS(splitter2->nthValue(3), 1);
+    TS_ASSERT_EQUALS(splitter2->nthTime(4), Kernel::DateAndTime(20365000000));
+    TS_ASSERT_EQUALS(splitter2->nthValue(4), 0);
+
+    TS_ASSERT_EQUALS(splitter2->nthTime(5), Kernel::DateAndTime(20400000000));
+    TS_ASSERT_EQUALS(splitter2->nthValue(5), 1);
+    TS_ASSERT_EQUALS(splitter2->nthTime(6), Kernel::DateAndTime(20465000000));
+    TS_ASSERT_EQUALS(splitter2->nthValue(6), 0);
+
     // Clean up
     AnalysisDataService::Instance().remove("Test02");
     AnalysisDataService::Instance().remove("Splitter02");
@@ -525,6 +588,7 @@ public:
 
     return;
   }
+
   //----------------------------------------------------------------------------------------------
   /**  Filter events without any correction and test for splitters in
    *MatrixWorkspace format
@@ -540,7 +604,14 @@ public:
     * In this test
    *  (1) Leave correction table workspace empty
    *  (2) Count events in each output including "-1", the excluded/unselected
-   *events
+   *      events
+   *
+   * Splitter-log test:
+        979: 0: 0  -  3.5e+07: 0
+        979: 1: 3.5e+07  -  1.95e+08: 1
+        979: 2: 1.95e+08  -  2.65e+08: 2
+        979: 3: 2.65e+08  -  3.65e+08: 2
+        979: 4: 3.65e+08  -  4.65e+08: 2
    */
   void test_FilterRelativeTime() {
     // Create EventWorkspace and SplittersWorkspace
@@ -564,7 +635,7 @@ public:
     filter.setProperty("OutputWorkspaceBaseName", "FilteredWS10");
     filter.setProperty("SplitterWorkspace", "Splitter10");
     filter.setProperty("RelativeTime", true);
-    filter.setProperty("OutputWorkspaceIndexedFrom1", true);
+    filter.setProperty("OutputWorkspaceIndexedFrom1", false);
 
     // Execute
     TS_ASSERT_THROWS_NOTHING(filter.execute());
@@ -574,28 +645,286 @@ public:
     int numsplittedws = filter.getProperty("NumberOutputWS");
     TS_ASSERT_EQUALS(numsplittedws, 3);
 
+    std::vector<std::string> output_ws_vector =
+        filter.getProperty("OutputWorkspaceNames");
+    for (size_t i = 0; i < output_ws_vector.size(); ++i)
+      std::cout << "Output workspace " << i << ": " << output_ws_vector[i]
+                << "\n";
+
     // Workspace 0
     EventWorkspace_sptr filteredws0 =
         boost::dynamic_pointer_cast<EventWorkspace>(
-            AnalysisDataService::Instance().retrieve("FilteredWS10_1"));
+            AnalysisDataService::Instance().retrieve("FilteredWS10_0"));
     TS_ASSERT(filteredws0);
     TS_ASSERT_EQUALS(filteredws0->getNumberHistograms(), 10);
     TS_ASSERT_EQUALS(filteredws0->getSpectrum(0).getNumberEvents(), 3);
 
+    // check sample log "splitter"
+    TS_ASSERT(filteredws0->run().hasProperty("splitter"));
+    Kernel::TimeSeriesProperty<int> *splitter0 =
+        dynamic_cast<Kernel::TimeSeriesProperty<int> *>(
+            filteredws0->run().getProperty("splitter"));
+    TS_ASSERT(splitter0);
+    TS_ASSERT_EQUALS(splitter0->size(), 2);
+    TS_ASSERT_EQUALS(splitter0->nthTime(0), Kernel::DateAndTime(runstart_i64));
+    TS_ASSERT_EQUALS(splitter0->nthValue(0), 1);
+    TS_ASSERT_EQUALS(
+        splitter0->nthTime(1).totalNanoseconds(),
+        Kernel::DateAndTime(static_cast<int>(3.5e+07)).totalNanoseconds() +
+            runstart_i64);
+    TS_ASSERT_EQUALS(splitter0->nthValue(1), 0);
+
     // Workspace 1
     EventWorkspace_sptr filteredws1 =
+        boost::dynamic_pointer_cast<EventWorkspace>(
+            AnalysisDataService::Instance().retrieve("FilteredWS10_1"));
+    TS_ASSERT(filteredws1);
+    TS_ASSERT_EQUALS(filteredws1->getSpectrum(1).getNumberEvents(), 16);
+
+    // check log
+    TS_ASSERT(filteredws1->run().hasProperty("splitter"));
+    Kernel::TimeSeriesProperty<int> *splitter1 =
+        dynamic_cast<Kernel::TimeSeriesProperty<int> *>(
+            filteredws1->run().getProperty("splitter"));
+    TS_ASSERT(splitter1);
+    TS_ASSERT_EQUALS(splitter1->size(), 3);
+
+    TS_ASSERT_EQUALS(splitter1->nthTime(0), Kernel::DateAndTime(runstart_i64));
+    TS_ASSERT_EQUALS(splitter1->nthValue(0), 0);
+
+    TS_ASSERT_EQUALS(
+        splitter1->nthTime(1).totalNanoseconds(),
+        Kernel::DateAndTime(static_cast<int>(3.5e+07)).totalNanoseconds() +
+            runstart_i64);
+    TS_ASSERT_EQUALS(splitter1->nthValue(1), 1);
+    TS_ASSERT_EQUALS(
+        splitter1->nthTime(2).totalNanoseconds(),
+        Kernel::DateAndTime(static_cast<int>(1.95e+08)).totalNanoseconds() +
+            runstart_i64);
+    TS_ASSERT_EQUALS(splitter1->nthValue(2), 0);
+
+    // Workspace 2
+    EventWorkspace_sptr filteredws2 =
         boost::dynamic_pointer_cast<EventWorkspace>(
             AnalysisDataService::Instance().retrieve("FilteredWS10_2"));
+    TS_ASSERT(filteredws2);
+    TS_ASSERT_EQUALS(filteredws2->getSpectrum(1).getNumberEvents(), 27);
+
+    TS_ASSERT(filteredws2->run().hasProperty("splitter"));
+
+    // check splitter log
+    TS_ASSERT(filteredws2->run().hasProperty("splitter"));
+    Kernel::TimeSeriesProperty<int> *splitter2 =
+        dynamic_cast<Kernel::TimeSeriesProperty<int> *>(
+            filteredws2->run().getProperty("splitter"));
+    TS_ASSERT(splitter2);
+    TS_ASSERT_EQUALS(splitter2->size(), 7);
+
+    TS_ASSERT_EQUALS(splitter2->nthTime(0), Kernel::DateAndTime(runstart_i64));
+    TS_ASSERT_EQUALS(splitter2->nthValue(0), 0);
+
+    TS_ASSERT_EQUALS(
+        splitter2->nthTime(1).totalNanoseconds(),
+        Kernel::DateAndTime(static_cast<int>(1.95e+08)).totalNanoseconds() +
+            runstart_i64);
+    TS_ASSERT_EQUALS(splitter2->nthValue(1), 1);
+    TS_ASSERT_EQUALS(
+        splitter2->nthTime(2).totalNanoseconds(),
+        Kernel::DateAndTime(static_cast<int>(2.65e+08)).totalNanoseconds() +
+            runstart_i64);
+    TS_ASSERT_EQUALS(splitter2->nthValue(2), 0);
+
+    // Check spectrum 3 of workspace 2
+    EventList elist3 = filteredws2->getSpectrum(3);
+    elist3.sortPulseTimeTOF();
+
+    TofEvent eventmin = elist3.getEvent(0);
+    TS_ASSERT_EQUALS(eventmin.pulseTime().totalNanoseconds(),
+                     runstart_i64 + pulsedt * 2);
+    TS_ASSERT_DELTA(eventmin.tof(), 0, 1.0E-4);
+
+    TofEvent eventmax = elist3.getEvent(26);
+    TS_ASSERT_EQUALS(eventmax.pulseTime().totalNanoseconds(),
+                     runstart_i64 + pulsedt * 4);
+    TS_ASSERT_DELTA(eventmax.tof(), static_cast<double>(tofdt * 6 / 1000),
+                    1.0E-4);
+
+    //  Test the sample logs
+    std::vector<std::string> outputwsnames =
+        filter.getProperty("OutputWorkspaceNames");
+    for (size_t i = 0; i < outputwsnames.size(); ++i) {
+      EventWorkspace_sptr filtered_ws =
+          boost::dynamic_pointer_cast<DataObjects::EventWorkspace>(
+              AnalysisDataService::Instance().retrieve(outputwsnames[i]));
+
+      TS_ASSERT(filtered_ws->run().hasProperty("LogA"));
+      TS_ASSERT(filtered_ws->run().hasProperty("LogB"));
+      TS_ASSERT(filtered_ws->run().hasProperty("LogC"));
+
+      Kernel::Property *logA = filtered_ws->run().getProperty("LogA");
+      std::string valueA = logA->value();
+      TS_ASSERT_EQUALS(valueA.compare("A"), 0);
+
+      TS_ASSERT(filtered_ws->run().hasProperty("slow_int_log"));
+      Kernel::TimeSeriesProperty<int> *intlog =
+          dynamic_cast<Kernel::TimeSeriesProperty<int> *>(
+              filtered_ws->run().getProperty("slow_int_log"));
+      TS_ASSERT(intlog);
+    }
+
+    // clean up all the workspaces generated
+    AnalysisDataService::Instance().remove("Test10");
+    AnalysisDataService::Instance().remove("Splitter10");
+    for (size_t i = 0; i < outputwsnames.size(); ++i) {
+      AnalysisDataService::Instance().remove(outputwsnames[i]);
+    }
+
+    return;
+  }
+
+  //----------------------------------------------------------------------------------------------
+  /**  Filter events without any correction and test for splitters in
+   *    TableWorkspace filter format
+   *    and the time given for splitters is relative
+   *
+   *  It is exacly the same as unit test: test_FilterRelativeTime()
+   *
+   *  Event workspace:
+   * (1) 10 detectors
+   * (2) Run starts @ 20000000000 seconds
+   * (3) Pulse length = 100*1000*1000 seconds
+   * (4) Within one pulse, two consecutive events/neutrons is apart for
+   * 10*1000*1000 seconds
+   * (5) "Experiment": 5 pulse times.  10 events in each pulse
+   *
+   * In this test
+   *  (1) Leave correction table workspace empty
+   *  (2) Count events in each output including "-1", the excluded/unselected
+   * events
+   */
+  void test_tableSplitter() {
+    // Create EventWorkspace and SplittersWorkspace
+    int64_t runstart_i64 = 20000000000;
+    int64_t pulsedt = 100 * 1000 * 1000;
+    int64_t tofdt = 10 * 1000 * 1000;
+    size_t numpulses = 5;
+
+    EventWorkspace_sptr inpWS =
+        createEventWorkspace(runstart_i64, pulsedt, tofdt, numpulses);
+    AnalysisDataService::Instance().addOrReplace("Test11", inpWS);
+
+    DataObjects::TableWorkspace_sptr splws =
+        createTableSplitters(0, pulsedt, tofdt);
+    AnalysisDataService::Instance().addOrReplace("TableSplitter1", splws);
+
+    FilterEvents filter;
+    filter.initialize();
+
+    // Set properties
+    filter.setProperty("InputWorkspace", "Test11");
+    filter.setProperty("OutputWorkspaceBaseName", "FilteredWS_FromTable");
+    filter.setProperty("SplitterWorkspace", "TableSplitter1");
+    filter.setProperty("RelativeTime", true);
+    filter.setProperty("OutputWorkspaceIndexedFrom1", true);
+
+    // Execute
+    TS_ASSERT_THROWS_NOTHING(filter.execute());
+    TS_ASSERT(filter.isExecuted());
+
+    // Get 3 output workspaces
+    int numsplittedws = filter.getProperty("NumberOutputWS");
+    TS_ASSERT_EQUALS(numsplittedws, 3);
+
+    std::vector<std::string> output_ws_vector =
+        filter.getProperty("OutputWorkspaceNames");
+    for (size_t i = 0; i < output_ws_vector.size(); ++i)
+      std::cout << "Output workspace " << i << ": " << output_ws_vector[i]
+                << "\n";
+
+    // Workspace 0
+    EventWorkspace_sptr filteredws0 =
+        boost::dynamic_pointer_cast<EventWorkspace>(
+            AnalysisDataService::Instance().retrieve("FilteredWS_FromTable_A"));
+    TS_ASSERT(filteredws0);
+    TS_ASSERT_EQUALS(filteredws0->getNumberHistograms(), 10);
+    TS_ASSERT_EQUALS(filteredws0->getSpectrum(0).getNumberEvents(), 3);
+
+    TS_ASSERT(filteredws0->run().hasProperty("splitter"));
+    // check sample log "splitter"
+    TS_ASSERT(filteredws0->run().hasProperty("splitter"));
+    Kernel::TimeSeriesProperty<int> *splitter0 =
+        dynamic_cast<Kernel::TimeSeriesProperty<int> *>(
+            filteredws0->run().getProperty("splitter"));
+    TS_ASSERT(splitter0);
+    TS_ASSERT_EQUALS(splitter0->size(), 2);
+    TS_ASSERT_EQUALS(splitter0->nthTime(0), Kernel::DateAndTime(runstart_i64));
+    TS_ASSERT_EQUALS(splitter0->nthValue(0), 1);
+    TS_ASSERT_EQUALS(
+        splitter0->nthTime(1).totalNanoseconds(),
+        Kernel::DateAndTime(static_cast<int>(3.5e+07)).totalNanoseconds() +
+            runstart_i64);
+    TS_ASSERT_EQUALS(splitter0->nthValue(1), 0);
+
+    // Workspace 1
+    EventWorkspace_sptr filteredws1 =
+        boost::dynamic_pointer_cast<EventWorkspace>(
+            AnalysisDataService::Instance().retrieve("FilteredWS_FromTable_B"));
     TS_ASSERT(filteredws1);
     TS_ASSERT_EQUALS(filteredws1->getSpectrum(1).getNumberEvents(), 16);
 
+    // check log
+    TS_ASSERT(filteredws1->run().hasProperty("splitter"));
+    Kernel::TimeSeriesProperty<int> *splitter1 =
+        dynamic_cast<Kernel::TimeSeriesProperty<int> *>(
+            filteredws1->run().getProperty("splitter"));
+    TS_ASSERT(splitter1);
+    TS_ASSERT_EQUALS(splitter1->size(), 3);
+
+    TS_ASSERT_EQUALS(splitter1->nthTime(0), Kernel::DateAndTime(runstart_i64));
+    TS_ASSERT_EQUALS(splitter1->nthValue(0), 0);
+
+    TS_ASSERT_EQUALS(
+        splitter1->nthTime(1).totalNanoseconds(),
+        Kernel::DateAndTime(static_cast<int>(3.5e+07)).totalNanoseconds() +
+            runstart_i64);
+    TS_ASSERT_EQUALS(splitter1->nthValue(1), 1);
+    TS_ASSERT_EQUALS(
+        splitter1->nthTime(2).totalNanoseconds(),
+        Kernel::DateAndTime(static_cast<int>(1.95e+08)).totalNanoseconds() +
+            runstart_i64);
+    TS_ASSERT_EQUALS(splitter1->nthValue(2), 0);
+
     // Workspace 2
     EventWorkspace_sptr filteredws2 =
         boost::dynamic_pointer_cast<EventWorkspace>(
-            AnalysisDataService::Instance().retrieve("FilteredWS10_3"));
+            AnalysisDataService::Instance().retrieve("FilteredWS_FromTable_C"));
     TS_ASSERT(filteredws2);
     TS_ASSERT_EQUALS(filteredws2->getSpectrum(1).getNumberEvents(), 27);
 
+    // check splitter log
+    TS_ASSERT(filteredws2->run().hasProperty("splitter"));
+    Kernel::TimeSeriesProperty<int> *splitter2 =
+        dynamic_cast<Kernel::TimeSeriesProperty<int> *>(
+            filteredws2->run().getProperty("splitter"));
+    TS_ASSERT(splitter2);
+    TS_ASSERT_EQUALS(splitter2->size(), 7);
+
+    TS_ASSERT_EQUALS(splitter2->nthTime(0), Kernel::DateAndTime(runstart_i64));
+    TS_ASSERT_EQUALS(splitter2->nthValue(0), 0);
+
+    TS_ASSERT_EQUALS(
+        splitter2->nthTime(1).totalNanoseconds(),
+        Kernel::DateAndTime(static_cast<int>(1.95e+08)).totalNanoseconds() +
+            runstart_i64);
+    TS_ASSERT_EQUALS(splitter2->nthValue(1), 1);
+    TS_ASSERT_EQUALS(
+        splitter2->nthTime(2).totalNanoseconds(),
+        Kernel::DateAndTime(static_cast<int>(2.65e+08)).totalNanoseconds() +
+            runstart_i64);
+    TS_ASSERT_EQUALS(splitter2->nthValue(2), 0);
+
+    // TODO - Find out the correct value of the splitter log 2
+
     // Check spectrum 3 of workspace 2
     EventList elist3 = filteredws2->getSpectrum(3);
     elist3.sortPulseTimeTOF();
@@ -611,9 +940,9 @@ public:
     TS_ASSERT_DELTA(eventmax.tof(), static_cast<double>(tofdt * 6 / 1000),
                     1.0E-4);
 
-    // 5. Clean up
-    AnalysisDataService::Instance().remove("Test02");
-    AnalysisDataService::Instance().remove("Splitter02");
+    // Clean up the generated workspaces
+    AnalysisDataService::Instance().remove("Test11");
+    AnalysisDataService::Instance().remove("TableSplitter1");
     std::vector<std::string> outputwsnames =
         filter.getProperty("OutputWorkspaceNames");
     for (size_t i = 0; i < outputwsnames.size(); ++i) {
@@ -622,6 +951,7 @@ public:
 
     return;
   }
+
   //----------------------------------------------------------------------------------------------
   /** Create an EventWorkspace.  This workspace has
     * @param runstart_i64 : absolute run start time in int64_t format with unit
@@ -667,6 +997,27 @@ public:
     eventWS->mutableRun().addLogData(pchargeLog.release());
     eventWS->mutableRun().integrateProtonCharge();
 
+    // add some arbitrary sample log for splitting or not splitting
+    eventWS->mutableRun().addProperty(
+        new Kernel::PropertyWithValue<std::string>("LogA", "A"));
+    eventWS->mutableRun().addProperty(
+        new Kernel::PropertyWithValue<std::string>("LogB", "B"));
+    eventWS->mutableRun().addProperty(
+        new Kernel::PropertyWithValue<std::string>("LogC", "C"), true);
+    eventWS->mutableRun().addProperty(
+        new Kernel::PropertyWithValue<std::string>("Title",
+                                                   "Testing EventWorkspace"));
+
+    // add an integer slow log
+    auto int_tsp =
+        Kernel::make_unique<Kernel::TimeSeriesProperty<int>>("slow_int_log");
+    for (size_t i = 0; i < 10; ++i) {
+      Kernel::DateAndTime log_time(runstart_i64 + 5 * pulsedt * i);
+      int log_value = static_cast<int>(i + 1) * 20;
+      int_tsp->addValue(log_time, log_value);
+    }
+    eventWS->mutableRun().addLogData(int_tsp.release());
+
     return eventWS;
   }
 
@@ -830,18 +1181,24 @@ public:
     Kernel::SplittingInterval interval0(t0, t1, 0);
     splitterws->addSplitter(interval0);
 
+    std::cout << "Add splitters: " << t0 << ", " << t1 << ", " << 0 << "\n";
+
     // 2. Splitter 1: 3+ ~ 9+ (second pulse)
     t0 = t1;
     t1 = runstart_i64 + pulsedt + tofdt * 9 + tofdt / 2;
     Kernel::SplittingInterval interval1(t0, t1, 1);
     splitterws->addSplitter(interval1);
 
+    std::cout << "Add splitters: " << t0 << ", " << t1 << ", " << 1 << "\n";
+
     // 3. Splitter 2: from 3rd pulse, 0 ~ 6+
     for (size_t i = 2; i < 5; i++) {
       t0 = runstart_i64 + i * pulsedt;
       t1 = runstart_i64 + i * pulsedt + 6 * tofdt + tofdt / 2;
       Kernel::SplittingInterval interval2(t0, t1, 2);
       splitterws->addSplitter(interval2);
+      // std::cout << "Add splitters: " << t0 << ", " << t1 << ", " << 2 <<
+      // "\n";
     }
 
     return splitterws;
@@ -902,9 +1259,78 @@ public:
     for (size_t iy = 0; iy < size_y; ++iy)
       splitterws->mutableY(0)[iy] = static_cast<double>(index_vec[iy]);
 
+    // print out splitters
+    for (size_t ix = 0; ix < size_y; ++ix)
+      std::cout << ix << ": " << splitterws->mutableX(0)[ix] * 1.0E-9 << "  -  "
+                << splitterws->mutableX(0)[ix + 1] * 1.0E-9 << ": "
+                << splitterws->mutableY(0)[ix] << "\n";
+
     return splitterws;
   }
 
+  /** Create splitters in TableWorkspace for output which is exactly as the
+   * Matrix splitters
+   *  Region:
+   * 0: pulse 0: 0 ~ 3+
+   * 1: pulse 0: 3+ ~ pulse 1: 9+
+   * 2: from pulse 2: 0 ~ 6+
+   * -1: from pulse 2: 6+ ~ 9+
+   * @brief createMatrixSplitter
+   * @param runstart_i64 : absolute run start time in int64_t format with unit
+   * nanosecond
+   * @param pulsedt: pulse length in int64_t format with unit nanosecond
+   * @param tofdt: time interval between 2 adjacent event in same pulse in
+   * int64_t format of unit nanosecond
+   * @return
+   */
+  DataObjects::TableWorkspace_sptr
+  createTableSplitters(int64_t runstart_i64, int64_t pulsedt, int64_t tofdt) {
+    // create table workspace
+    DataObjects::TableWorkspace_sptr tablesplitter =
+        boost::make_shared<DataObjects::TableWorkspace>();
+    tablesplitter->addColumn("double", "start");
+    tablesplitter->addColumn("double", "stop");
+    tablesplitter->addColumn("str", "target");
+
+    // generate row by row
+    // Splitter 0: 0 ~ 3+ (first pulse)
+    size_t row_index = 0;
+    int64_t t1 = runstart_i64 + tofdt * 3 + tofdt / 2;
+    std::string itarget = "A";
+    tablesplitter->appendRow();
+    tablesplitter->cell<double>(row_index, 0) =
+        static_cast<double>(runstart_i64) * 1.0E-9;
+    tablesplitter->cell<double>(row_index, 1) = static_cast<double>(t1) * 1.E-9;
+    tablesplitter->cell<std::string>(row_index, 2) = itarget;
+
+    // Splitter 1: 3+ ~ 9+ (second pulse)
+    ++row_index;
+    int64_t t2 = runstart_i64 + pulsedt + tofdt * 9 + tofdt / 2;
+    itarget = "B";
+    tablesplitter->appendRow();
+    tablesplitter->cell<double>(row_index, 0) =
+        static_cast<double>(t1) * 1.0E-9;
+    tablesplitter->cell<double>(row_index, 1) = static_cast<double>(t2) * 1.E-9;
+    tablesplitter->cell<std::string>(row_index, 2) = itarget;
+
+    // Splitter 2 and so on: from 3rd pulse, 0 ~ 6+
+    int64_t lastT = t2;
+    for (size_t i = 2; i < 5; i++) {
+      ++row_index;
+      itarget = "C";
+      int64_t newT = runstart_i64 + i * pulsedt + 6 * tofdt + tofdt / 2;
+      tablesplitter->appendRow();
+      tablesplitter->cell<double>(row_index, 0) =
+          static_cast<double>(lastT) * 1.0E-9;
+      tablesplitter->cell<double>(row_index, 1) =
+          static_cast<double>(newT) * 1.E-9;
+      tablesplitter->cell<std::string>(row_index, 2) = itarget;
+      lastT = newT;
+    }
+
+    return tablesplitter;
+  }
+
   //----------------------------------------------------------------------------------------------
   /** Create a Splitter for fast fequency log for output
     * The splitter is within every pulse.  2 groups of splitters are created.
diff --git a/Framework/Beamline/inc/MantidBeamline/DetectorInfo.h b/Framework/Beamline/inc/MantidBeamline/DetectorInfo.h
index 34abb06a7a4cec78840dc179e3a2c547e2ff9fc7..b99b148ec1199d34645faf90a65b467e57d6b319 100644
--- a/Framework/Beamline/inc/MantidBeamline/DetectorInfo.h
+++ b/Framework/Beamline/inc/MantidBeamline/DetectorInfo.h
@@ -68,41 +68,88 @@ public:
   bool isEquivalent(const DetectorInfo &other) const;
 
   size_t size() const;
+  bool isScanning() const;
 
   bool isMonitor(const size_t index) const;
+  bool isMonitor(const std::pair<size_t, size_t> &index) const;
   bool isMasked(const size_t index) const;
+  bool isMasked(const std::pair<size_t, size_t> &index) const;
   void setMasked(const size_t index, bool masked);
+  void setMasked(const std::pair<size_t, size_t> &index, bool masked);
   Eigen::Vector3d position(const size_t index) const;
+  Eigen::Vector3d position(const std::pair<size_t, size_t> &index) const;
   Eigen::Quaterniond rotation(const size_t index) const;
+  Eigen::Quaterniond rotation(const std::pair<size_t, size_t> &index) const;
   void setPosition(const size_t index, const Eigen::Vector3d &position);
+  void setPosition(const std::pair<size_t, size_t> &index,
+                   const Eigen::Vector3d &position);
   void setRotation(const size_t index, const Eigen::Quaterniond &rotation);
+  void setRotation(const std::pair<size_t, size_t> &index,
+                   const Eigen::Quaterniond &rotation);
+
+  size_t scanCount(const size_t index) const;
+  std::pair<int64_t, int64_t>
+  scanInterval(const std::pair<size_t, size_t> &index) const;
+  void setScanInterval(const size_t index,
+                       const std::pair<int64_t, int64_t> &interval);
+
+  void merge(const DetectorInfo &other);
 
 private:
+  size_t linearIndex(const std::pair<size_t, size_t> &index) const;
+  void checkNoTimeDependence() const;
+  void initScanCounts();
+  void initScanIntervals();
+  void initIndices();
+  std::vector<bool> buildMergeIndices(const DetectorInfo &other) const;
+
   Kernel::cow_ptr<std::vector<bool>> m_isMonitor{nullptr};
   Kernel::cow_ptr<std::vector<bool>> m_isMasked{nullptr};
   Kernel::cow_ptr<std::vector<Eigen::Vector3d>> m_positions{nullptr};
   Kernel::cow_ptr<std::vector<Eigen::Quaterniond>> m_rotations{nullptr};
+
+  Kernel::cow_ptr<std::vector<size_t>> m_scanCounts{nullptr};
+  Kernel::cow_ptr<std::vector<std::pair<int64_t, int64_t>>> m_scanIntervals{
+      nullptr};
+  Kernel::cow_ptr<std::vector<std::vector<size_t>>> m_indexMap{nullptr};
+  Kernel::cow_ptr<std::vector<std::pair<size_t, size_t>>> m_indices{nullptr};
 };
 
-/// Returns the position of the detector with given index.
+/** Returns the position of the detector with given detector index.
+ *
+ * Convenience method for beamlines with static (non-moving) detectors.
+ * Throws if there are time-dependent detectors. */
 inline Eigen::Vector3d DetectorInfo::position(const size_t index) const {
+  checkNoTimeDependence();
   return (*m_positions)[index];
 }
 
-/// Returns the rotation of the detector with given index.
+/** Returns the rotation of the detector with given detector index.
+ *
+ * Convenience method for beamlines with static (non-moving) detectors.
+ * Throws if there are time-dependent detectors. */
 inline Eigen::Quaterniond DetectorInfo::rotation(const size_t index) const {
+  checkNoTimeDependence();
   return (*m_rotations)[index];
 }
 
-/// Set the position of the detector with given index.
+/** Set the position of the detector with given detector index.
+ *
+ * Convenience method for beamlines with static (non-moving) detectors.
+ * Throws if there are time-dependent detectors. */
 inline void DetectorInfo::setPosition(const size_t index,
                                       const Eigen::Vector3d &position) {
+  checkNoTimeDependence();
   m_positions.access()[index] = position;
 }
 
-/// Set the rotation of the detector with given index.
+/** Set the rotation of the detector with given detector index.
+ *
+ * Convenience method for beamlines with static (non-moving) detectors.
+ * Throws if there are time-dependent detectors. */
 inline void DetectorInfo::setRotation(const size_t index,
                                       const Eigen::Quaterniond &rotation) {
+  checkNoTimeDependence();
   m_rotations.access()[index] = rotation.normalized();
 }
 
diff --git a/Framework/Beamline/src/DetectorInfo.cpp b/Framework/Beamline/src/DetectorInfo.cpp
index 321244c1346b44b3e0efe1759e2cbead91d0f386..80a55bfed43160283379d6b311f3a534d1edb5f8 100644
--- a/Framework/Beamline/src/DetectorInfo.cpp
+++ b/Framework/Beamline/src/DetectorInfo.cpp
@@ -34,11 +34,12 @@ DetectorInfo::DetectorInfo(std::vector<Eigen::Vector3d> positions,
  * - Positions that differ by less than 1 nm = 1e-9 m are considered equivalent.
  * - Rotations that imply relative position changes of less than 1 nm = 1e-9 m
  *   with a rotation center that is 1000 m away are considered equivalent.
- * Note that in both cases the actual limit may be lower, but it is guarenteed
+ * Note that in both cases the actual limit may be lower, but it is guaranteed
  * that any LARGER differences are NOT considered equivalent. */
 bool DetectorInfo::isEquivalent(const DetectorInfo &other) const {
   if (this == &other)
     return true;
+  // Same number of detectors
   if (size() != other.size())
     return false;
   if (size() == 0)
@@ -48,6 +49,17 @@ bool DetectorInfo::isEquivalent(const DetectorInfo &other) const {
     return false;
   if (!(m_isMasked == other.m_isMasked) && (*m_isMasked != *other.m_isMasked))
     return false;
+
+  // Scanning related fields. Not testing m_scanCounts and m_indexMap since
+  // those just are internally derived from m_indices.
+  if (m_scanIntervals && other.m_scanIntervals &&
+      !(m_scanIntervals == other.m_scanIntervals) &&
+      (*m_scanIntervals != *other.m_scanIntervals))
+    return false;
+  if (m_indices && other.m_indices && !(m_indices == other.m_indices) &&
+      (*m_indices != *other.m_indices))
+    return false;
+
   // Positions: Absolute difference matter, so comparison is not relative.
   // Changes below 1 nm = 1e-9 m are allowed.
   if (!(m_positions == other.m_positions) &&
@@ -78,28 +90,256 @@ bool DetectorInfo::isEquivalent(const DetectorInfo &other) const {
   return true;
 }
 
-/// Returns the size of the DetectorInfo, i.e., the number of detectors in the
-/// instrument.
+/** Returns the number of detectors in the instrument.
+ *
+ * If a detector is moving, i.e., has more than one associated position, it is
+ * nevertheless only counted as a single detector. */
 size_t DetectorInfo::size() const {
-  if (!m_isMasked)
+  if (!m_isMonitor)
     return 0;
-  return m_isMasked->size();
+  return m_isMonitor->size();
 }
 
-/// Returns true if the detector is a monitor.
+/// Returns true if the beamline has scanning detectors.
+bool DetectorInfo::isScanning() const { return size() != m_positions->size(); }
+
+/// Returns true if the detector with given detector index is a monitor.
 bool DetectorInfo::isMonitor(const size_t index) const {
+  // No check for time dependence since monitor flags are not time dependent.
   return (*m_isMonitor)[index];
 }
 
-/// Returns true if the detector is masked.
+/** Returns true if the detector with given index is a monitor.
+ *
+ * The time component of the index is ignored since a detector is a monitor
+ * either for *all* times or for *none*. */
+bool DetectorInfo::isMonitor(const std::pair<size_t, size_t> &index) const {
+  // Monitors are not time dependent, ignore time component of index.
+  return (*m_isMonitor)[index.first];
+}
+
+/** Returns true if the detector with given detector index is masked.
+ *
+ * Convenience method for beamlines with static (non-moving) detectors.
+ * Throws if there are time-dependent detectors. */
 bool DetectorInfo::isMasked(const size_t index) const {
+  checkNoTimeDependence();
   return (*m_isMasked)[index];
 }
 
-/// Set the mask flag of the detector with given index. Not thread safe.
+/// Returns true if the detector with given index is masked.
+bool DetectorInfo::isMasked(const std::pair<size_t, size_t> &index) const {
+  return (*m_isMasked)[linearIndex(index)];
+}
+
+/** Set the mask flag of the detector with given detector index. Not thread
+ * safe.
+ *
+ * Convenience method for beamlines with static (non-moving) detectors.
+ * Throws if there are time-dependent detectors. */
 void DetectorInfo::setMasked(const size_t index, bool masked) {
+  checkNoTimeDependence();
   m_isMasked.access()[index] = masked;
 }
 
+/// Set the mask flag of the detector with given index. Not thread safe.
+void DetectorInfo::setMasked(const std::pair<size_t, size_t> &index,
+                             bool masked) {
+  m_isMasked.access()[linearIndex(index)] = masked;
+}
+
+/// Returns the position of the detector with given index.
+Eigen::Vector3d
+DetectorInfo::position(const std::pair<size_t, size_t> &index) const {
+  return (*m_positions)[linearIndex(index)];
+}
+
+/// Returns the rotation of the detector with given index.
+Eigen::Quaterniond
+DetectorInfo::rotation(const std::pair<size_t, size_t> &index) const {
+  return (*m_rotations)[linearIndex(index)];
+}
+
+/// Set the position of the detector with given index.
+void DetectorInfo::setPosition(const std::pair<size_t, size_t> &index,
+                               const Eigen::Vector3d &position) {
+  m_positions.access()[linearIndex(index)] = position;
+}
+
+/// Set the rotation of the detector with given index.
+void DetectorInfo::setRotation(const std::pair<size_t, size_t> &index,
+                               const Eigen::Quaterniond &rotation) {
+  m_rotations.access()[linearIndex(index)] = rotation.normalized();
+}
+
+/// Returns the scan count of the detector with given detector index.
+size_t DetectorInfo::scanCount(const size_t index) const {
+  if (!m_scanCounts)
+    return 1;
+  return (*m_scanCounts)[index];
+}
+
+/** Returns the scan interval of the detector with given index.
+ *
+ * The interval start and end values would typically correspond to nanoseconds
+ * since 1990, as in Kernel::DateAndTime. */
+std::pair<int64_t, int64_t>
+DetectorInfo::scanInterval(const std::pair<size_t, size_t> &index) const {
+  if (!m_scanIntervals)
+    return {0, 0};
+  return (*m_scanIntervals)[linearIndex(index)];
+}
+
+/** Set the scan interval of the detector with given detector index.
+ *
+ * The interval start and end values would typically correspond to nanoseconds
+ * since 1990, as in Kernel::DateAndTime. Note that it is currently not possible
+ * to modify scan intervals for a DetectorInfo with time-dependent detectors,
+ * i.e., time intervals must be set with this method before merging individual
+ * scans. */
+void DetectorInfo::setScanInterval(
+    const size_t index, const std::pair<int64_t, int64_t> &interval) {
+  checkNoTimeDependence();
+  if (!m_scanIntervals)
+    initScanIntervals();
+  if (interval.first >= interval.second)
+    throw std::runtime_error(
+        "DetectorInfo: cannot set scan interval with start >= end");
+  m_scanIntervals.access()[index] = interval;
+}
+
+namespace {
+void failMerge(const std::string &what) {
+  throw std::runtime_error(std::string("Cannot merge DetectorInfo: ") + what);
+}
+
+std::pair<size_t, size_t>
+getIndex(const Kernel::cow_ptr<std::vector<std::pair<size_t, size_t>>> &indices,
+         const size_t index) {
+  if (!indices)
+    return {index, 0};
+  return (*indices)[index];
+}
+}
+
+/** Merges the contents of other into this.
+ *
+ * Scan intervals in both other and this must be set. Intervals must be
+ * identical or non-overlapping. If they are identical all other parameters (for
+ * that index) must match.
+ *
+ * Time indices in `this` are preserved. Time indices added from `other` are
+ * incremented by the scan count of that detector in `this`. The relative order
+ * of time indices added from `other` is preserved. If the interval for a time
+ * index in `other` is identical to a corresponding interval in `this`, it is
+ * ignored, i.e., no time index is added. */
+void DetectorInfo::merge(const DetectorInfo &other) {
+  const auto &merge = buildMergeIndices(other);
+  if (!m_scanCounts)
+    initScanCounts();
+  if (!m_indexMap)
+    initIndices();
+  // Temporary to accumulate scan counts (need original for index offset).
+  auto scanCounts(m_scanCounts);
+  for (size_t linearIndex = 0; linearIndex < other.m_positions->size();
+       ++linearIndex) {
+    if (!merge[linearIndex])
+      continue;
+    auto newIndex = getIndex(other.m_indices, linearIndex);
+    const size_t detIndex = newIndex.first;
+    newIndex.second += scanCount(detIndex);
+    scanCounts.access()[detIndex]++;
+    m_indexMap.access()[detIndex].push_back((*m_indices).size());
+    m_indices.access().push_back(newIndex);
+    m_isMasked.access().push_back((*other.m_isMasked)[linearIndex]);
+    m_positions.access().push_back((*other.m_positions)[linearIndex]);
+    m_rotations.access().push_back((*other.m_rotations)[linearIndex]);
+    m_scanIntervals.access().push_back((*other.m_scanIntervals)[linearIndex]);
+  }
+  m_scanCounts = std::move(scanCounts);
+}
+
+/// Returns the linear index for a pair of detector index and time index.
+size_t DetectorInfo::linearIndex(const std::pair<size_t, size_t> &index) const {
+  // The most common case are beamlines with static detectors. In that case the
+  // time index is always 0 and we avoid expensive map lookups. Linear indices
+  // are ordered such that the first block contains everything for time index 0
+  // so even in the time dependent case no translation is necessary.
+  if (index.second == 0)
+    return index.first;
+  return (*m_indexMap)[index.first][index.second];
+}
+
+/// Throws if this has time-dependent data.
+void DetectorInfo::checkNoTimeDependence() const {
+  if (isScanning())
+    throw std::runtime_error("DetectorInfo accessed without time index but the "
+                             "beamline has time-dependent (moving) detectors.");
+}
+
+void DetectorInfo::initScanCounts() {
+  checkNoTimeDependence();
+  m_scanCounts = Kernel::make_cow<std::vector<size_t>>(size(), 1);
+}
+
+void DetectorInfo::initScanIntervals() {
+  checkNoTimeDependence();
+  m_scanIntervals = Kernel::make_cow<std::vector<std::pair<int64_t, int64_t>>>(
+      size(), std::pair<int64_t, int64_t>{0, 1});
+}
+
+void DetectorInfo::initIndices() {
+  checkNoTimeDependence();
+  m_indexMap = Kernel::make_cow<std::vector<std::vector<size_t>>>();
+  m_indices = Kernel::make_cow<std::vector<std::pair<size_t, size_t>>>();
+  auto &indexMap = m_indexMap.access();
+  auto &indices = m_indices.access();
+  indexMap.reserve(size());
+  indices.reserve(size());
+  // No time dependence, so both the detector index and the linear index are i.
+  for (size_t i = 0; i < size(); ++i) {
+    indexMap.emplace_back(1, i);
+    indices.emplace_back(i, 0);
+  }
+}
+
+std::vector<bool>
+DetectorInfo::buildMergeIndices(const DetectorInfo &other) const {
+  if (size() != other.size())
+    failMerge("size mismatch");
+  if (!m_scanIntervals || !other.m_scanIntervals)
+    failMerge("scan intervals not defined");
+  if (!(m_isMonitor == other.m_isMonitor) &&
+      (*m_isMonitor != *other.m_isMonitor))
+    failMerge("monitor flags mismatch");
+  // TODO If we make masking time-independent we need to check masking here.
+
+  std::vector<bool> merge(other.m_positions->size(), true);
+
+  for (size_t linearIndex1 = 0; linearIndex1 < other.m_positions->size();
+       ++linearIndex1) {
+    const size_t detIndex = getIndex(other.m_indices, linearIndex1).first;
+    const auto &interval1 = (*other.m_scanIntervals)[linearIndex1];
+    for (size_t timeIndex = 0; timeIndex < scanCount(detIndex); ++timeIndex) {
+      const auto linearIndex2 = linearIndex({detIndex, timeIndex});
+      const auto &interval2 = (*m_scanIntervals)[linearIndex2];
+      if (interval1 == interval2) {
+        if ((*m_isMasked)[linearIndex2] != (*other.m_isMasked)[linearIndex1])
+          failMerge("matching scan interval but mask flags differ");
+        if ((*m_positions)[linearIndex2] != (*other.m_positions)[linearIndex1])
+          failMerge("matching scan interval but positions differ");
+        if ((*m_rotations)[linearIndex2].coeffs() !=
+            (*other.m_rotations)[linearIndex1].coeffs())
+          failMerge("matching scan interval but rotations differ");
+        merge[linearIndex1] = false;
+      } else if ((interval1.first < interval2.second) &&
+                 (interval1.second > interval2.first)) {
+        failMerge("scan intervals overlap but not identical");
+      }
+    }
+  }
+  return merge;
+}
+
 } // namespace Beamline
 } // namespace Mantid
diff --git a/Framework/Beamline/test/DetectorInfoTest.h b/Framework/Beamline/test/DetectorInfoTest.h
index b7afd1232d79af10022b85e69ca4e7968eee0da3..418746bc99cd44a411aa804c919566e2e5f5e26b 100644
--- a/Framework/Beamline/test/DetectorInfoTest.h
+++ b/Framework/Beamline/test/DetectorInfoTest.h
@@ -25,6 +25,7 @@ public:
     TS_ASSERT_THROWS_NOTHING(
         detInfo = Kernel::make_unique<DetectorInfo>(PosVec(1), RotVec(1)));
     TS_ASSERT_EQUALS(detInfo->size(), 1);
+    TS_ASSERT(!detInfo->isScanning());
   }
 
   void test_constructor_with_monitors() {
@@ -253,6 +254,272 @@ public:
     info.setRotation(0, rot);
     TS_ASSERT_EQUALS(info.rotation(0).coeffs(), rot.normalized().coeffs());
   }
+
+  void test_scanCount() {
+    DetectorInfo info(PosVec(1), RotVec(1));
+    TS_ASSERT_EQUALS(info.scanCount(0), 1);
+  }
+
+  void test_scanInterval() {
+    DetectorInfo info(PosVec(1), RotVec(1));
+    TS_ASSERT_EQUALS(info.scanInterval({0, 0}),
+                     (std::pair<int64_t, int64_t>(0, 0)));
+  }
+
+  void test_setScanInterval() {
+    DetectorInfo info(PosVec(1), RotVec(1));
+    info.setScanInterval(0, {1, 2});
+    TS_ASSERT_EQUALS(info.scanInterval({0, 0}),
+                     (std::pair<int64_t, int64_t>(1, 2)));
+  }
+
+  void test_setScanInterval_failures() {
+    DetectorInfo info(PosVec(1), RotVec(1));
+    TS_ASSERT_THROWS_EQUALS(
+        info.setScanInterval(0, {1, 1}), const std::runtime_error &e,
+        std::string(e.what()),
+        "DetectorInfo: cannot set scan interval with start >= end");
+    TS_ASSERT_THROWS_EQUALS(
+        info.setScanInterval(0, {2, 1}), const std::runtime_error &e,
+        std::string(e.what()),
+        "DetectorInfo: cannot set scan interval with start >= end");
+  }
+
+  void test_merge_fail_size() {
+    DetectorInfo a(PosVec(1), RotVec(1));
+    DetectorInfo b(PosVec(2), RotVec(2));
+    a.setScanInterval(0, {0, 1});
+    b.setScanInterval(0, {0, 1});
+    b.setScanInterval(1, {0, 1});
+    TS_ASSERT_THROWS_EQUALS(a.merge(b), const std::runtime_error &e,
+                            std::string(e.what()),
+                            "Cannot merge DetectorInfo: size mismatch");
+  }
+
+  void test_merge_fail_no_intervals() {
+    DetectorInfo a(PosVec(1), RotVec(1));
+    DetectorInfo b(PosVec(1), RotVec(1));
+    DetectorInfo c(PosVec(1), RotVec(1));
+    TS_ASSERT_THROWS_EQUALS(
+        a.merge(b), const std::runtime_error &e, std::string(e.what()),
+        "Cannot merge DetectorInfo: scan intervals not defined");
+    c.setScanInterval(0, {0, 1});
+    TS_ASSERT_THROWS_EQUALS(
+        a.merge(c), const std::runtime_error &e, std::string(e.what()),
+        "Cannot merge DetectorInfo: scan intervals not defined");
+    a.setScanInterval(0, {0, 1});
+    TS_ASSERT_THROWS_EQUALS(
+        a.merge(b), const std::runtime_error &e, std::string(e.what()),
+        "Cannot merge DetectorInfo: scan intervals not defined");
+  }
+
+  void test_merge_fail_monitor_mismatch() {
+    DetectorInfo a(PosVec(2), RotVec(2));
+    DetectorInfo b(PosVec(2), RotVec(2), {1});
+    a.setScanInterval(0, {0, 1});
+    a.setScanInterval(1, {0, 1});
+    b.setScanInterval(0, {0, 1});
+    b.setScanInterval(1, {0, 1});
+    TS_ASSERT_THROWS_EQUALS(
+        a.merge(b), const std::runtime_error &e, std::string(e.what()),
+        "Cannot merge DetectorInfo: monitor flags mismatch");
+  }
+
+  void test_merge_identical_interval_failures() {
+    DetectorInfo a(PosVec(1), RotVec(1));
+    a.setScanInterval(0, {0, 1});
+    Eigen::Vector3d pos1(1, 0, 0);
+    Eigen::Vector3d pos2(2, 0, 0);
+    Eigen::Quaterniond rot1(
+        Eigen::AngleAxisd(30.0, Eigen::Vector3d{1, 2, 3}.normalized()));
+    Eigen::Quaterniond rot2(
+        Eigen::AngleAxisd(31.0, Eigen::Vector3d{1, 2, 3}.normalized()));
+    a.setMasked(0, true);
+    a.setPosition(0, pos1);
+    a.setRotation(0, rot1);
+    auto b(a);
+    TS_ASSERT_THROWS_NOTHING(b.merge(a));
+
+    b = a;
+    b.setMasked(0, false);
+    TS_ASSERT_THROWS_EQUALS(b.merge(a), const std::runtime_error &e,
+                            std::string(e.what()), "Cannot merge DetectorInfo: "
+                                                   "matching scan interval but "
+                                                   "mask flags differ");
+    b.setMasked(0, true);
+    TS_ASSERT_THROWS_NOTHING(b.merge(a));
+
+    b = a;
+    b.setPosition(0, pos2);
+    TS_ASSERT_THROWS_EQUALS(b.merge(a), const std::runtime_error &e,
+                            std::string(e.what()), "Cannot merge DetectorInfo: "
+                                                   "matching scan interval but "
+                                                   "positions differ");
+    b.setPosition(0, pos1);
+    TS_ASSERT_THROWS_NOTHING(b.merge(a));
+
+    b = a;
+    b.setRotation(0, rot2);
+    TS_ASSERT_THROWS_EQUALS(b.merge(a), const std::runtime_error &e,
+                            std::string(e.what()), "Cannot merge DetectorInfo: "
+                                                   "matching scan interval but "
+                                                   "rotations differ");
+    b.setRotation(0, rot1);
+    TS_ASSERT_THROWS_NOTHING(b.merge(a));
+  }
+
+  void test_merge_identical_interval() {
+    DetectorInfo a(PosVec(1), RotVec(1));
+    a.setScanInterval(0, {0, 1});
+    const auto b(a);
+    TS_ASSERT_THROWS_NOTHING(a.merge(b));
+    TS_ASSERT(a.isEquivalent(b));
+  }
+
+  void test_merge_identical_interval_with_monitor() {
+    DetectorInfo a(PosVec(2), RotVec(2), {1});
+    a.setScanInterval(0, {0, 1});
+    a.setScanInterval(1, {0, 1});
+    const auto b(a);
+    TS_ASSERT_THROWS_NOTHING(a.merge(b));
+    TS_ASSERT(a.isEquivalent(b));
+  }
+
+  void test_merge_fail_partial_overlap() {
+    DetectorInfo a(PosVec(2), RotVec(2));
+    a.setScanInterval(0, {0, 10});
+    a.setScanInterval(1, {0, 10});
+    auto b(a);
+    TS_ASSERT_THROWS_NOTHING(b.merge(a));
+    b = a;
+    b.setScanInterval(1, {-1, 5});
+    TS_ASSERT_THROWS_EQUALS(
+        b.merge(a), const std::runtime_error &e, std::string(e.what()),
+        "Cannot merge DetectorInfo: scan intervals overlap but not identical");
+    b.setScanInterval(1, {1, 5});
+    TS_ASSERT_THROWS_EQUALS(
+        b.merge(a), const std::runtime_error &e, std::string(e.what()),
+        "Cannot merge DetectorInfo: scan intervals overlap but not identical");
+    b.setScanInterval(1, {1, 11});
+    TS_ASSERT_THROWS_EQUALS(
+        b.merge(a), const std::runtime_error &e, std::string(e.what()),
+        "Cannot merge DetectorInfo: scan intervals overlap but not identical");
+  }
+
+  void test_merge() {
+    DetectorInfo a(PosVec(2), RotVec(2), {1});
+    // Monitor at index 1, set up for identical interval
+    std::pair<int64_t, int64_t> monitorInterval(0, 2);
+    a.setScanInterval(1, monitorInterval);
+    auto b(a);
+    Eigen::Vector3d pos1(1, 0, 0);
+    Eigen::Vector3d pos2(2, 0, 0);
+    a.setPosition(0, pos1);
+    b.setPosition(0, pos2);
+    std::pair<int64_t, int64_t> interval1(0, 1);
+    std::pair<int64_t, int64_t> interval2(1, 2);
+    a.setScanInterval(0, interval1);
+    b.setScanInterval(0, interval2);
+    TS_ASSERT_THROWS_NOTHING(a.merge(b));
+    TS_ASSERT(a.isScanning());
+    TS_ASSERT(!a.isEquivalent(b));
+    TS_ASSERT_EQUALS(a.size(), 2);
+    TS_ASSERT_EQUALS(a.scanCount(0), 2);
+    // Note that the order is not guaranteed, currently these are just int the
+    // order in which the are merged.
+    TS_ASSERT_EQUALS(a.scanInterval({0, 0}), interval1);
+    TS_ASSERT_EQUALS(a.scanInterval({0, 1}), interval2);
+    TS_ASSERT_EQUALS(a.position({0, 0}), pos1);
+    TS_ASSERT_EQUALS(a.position({0, 1}), pos2);
+    // Monitor is not scanning
+    TS_ASSERT_EQUALS(a.scanCount(1), 1);
+  }
+
+  void test_merge_idempotent() {
+    // Test that A + B + B = A + B
+    DetectorInfo a(PosVec(2), RotVec(2), {1});
+    // Monitor at index 1, set up for identical interval
+    std::pair<int64_t, int64_t> monitorInterval(0, 2);
+    a.setScanInterval(1, monitorInterval);
+    a.setPosition(1, {0, 0, 0});
+    auto b(a);
+    Eigen::Vector3d pos1(1, 0, 0);
+    Eigen::Vector3d pos2(2, 0, 0);
+    a.setPosition(0, pos1);
+    b.setPosition(0, pos2);
+    std::pair<int64_t, int64_t> interval1(0, 1);
+    std::pair<int64_t, int64_t> interval2(1, 2);
+    a.setScanInterval(0, interval1);
+    b.setScanInterval(0, interval2);
+
+    TS_ASSERT_THROWS_NOTHING(a.merge(b));
+    auto a0(a);
+    TS_ASSERT_THROWS_NOTHING(a.merge(b));
+    TS_ASSERT(a.isEquivalent(a0));
+  }
+
+  void test_merge_multiple() {
+    DetectorInfo a(PosVec(2), RotVec(2), {1});
+    // Monitor at index 1, set up for identical interval
+    std::pair<int64_t, int64_t> monitorInterval(0, 3);
+    a.setScanInterval(1, monitorInterval);
+    auto b(a);
+    auto c(a);
+    Eigen::Vector3d pos1(1, 0, 0);
+    Eigen::Vector3d pos2(2, 0, 0);
+    Eigen::Vector3d pos3(3, 0, 0);
+    a.setPosition(0, pos1);
+    b.setPosition(0, pos2);
+    c.setPosition(0, pos3);
+    std::pair<int64_t, int64_t> interval1(0, 1);
+    std::pair<int64_t, int64_t> interval2(1, 2);
+    std::pair<int64_t, int64_t> interval3(2, 3);
+    a.setScanInterval(0, interval1);
+    b.setScanInterval(0, interval2);
+    c.setScanInterval(0, interval3);
+    TS_ASSERT_THROWS_NOTHING(a.merge(b));
+    TS_ASSERT_THROWS_NOTHING(a.merge(c));
+    TS_ASSERT(a.isScanning());
+    TS_ASSERT(!a.isEquivalent(b));
+    TS_ASSERT(!a.isEquivalent(c));
+    TS_ASSERT_EQUALS(a.size(), 2);
+    TS_ASSERT_EQUALS(a.scanCount(0), 3);
+    TS_ASSERT_EQUALS(a.scanInterval({0, 0}), interval1);
+    TS_ASSERT_EQUALS(a.scanInterval({0, 1}), interval2);
+    TS_ASSERT_EQUALS(a.scanInterval({0, 2}), interval3);
+    TS_ASSERT_EQUALS(a.position({0, 0}), pos1);
+    TS_ASSERT_EQUALS(a.position({0, 1}), pos2);
+    TS_ASSERT_EQUALS(a.position({0, 2}), pos3);
+    // Monitor is not scanning
+    TS_ASSERT_EQUALS(a.scanCount(1), 1);
+  }
+
+  void test_merge_multiple_associative() {
+    // Test that (A + B) + C == A + (B + C)
+    // This is implied by the ordering guaranteed by merge().
+    DetectorInfo a1(PosVec(1), RotVec(1));
+    a1.setRotation(0, Eigen::Quaterniond::Identity());
+    auto b(a1);
+    auto c(a1);
+    Eigen::Vector3d pos1(1, 0, 0);
+    Eigen::Vector3d pos2(2, 0, 0);
+    Eigen::Vector3d pos3(3, 0, 0);
+    a1.setPosition(0, pos1);
+    b.setPosition(0, pos2);
+    c.setPosition(0, pos3);
+    std::pair<int64_t, int64_t> interval1(0, 1);
+    std::pair<int64_t, int64_t> interval2(1, 2);
+    std::pair<int64_t, int64_t> interval3(2, 3);
+    a1.setScanInterval(0, interval1);
+    b.setScanInterval(0, interval2);
+    c.setScanInterval(0, interval3);
+    auto a2(a1);
+    TS_ASSERT_THROWS_NOTHING(a1.merge(b));
+    TS_ASSERT_THROWS_NOTHING(a1.merge(c));
+    TS_ASSERT_THROWS_NOTHING(b.merge(c));
+    TS_ASSERT_THROWS_NOTHING(a2.merge(b));
+    TS_ASSERT(a1.isEquivalent(a2));
+  }
 };
 
 #endif /* MANTID_BEAMLINE_DETECTORINFOTEST_H_ */
diff --git a/Framework/Kernel/inc/MantidKernel/TimeSeriesProperty.h b/Framework/Kernel/inc/MantidKernel/TimeSeriesProperty.h
index 4fe2e160f35718c4ac522d1c4a40c5f0152e8160..bd21cbed6f26dcd8414a3bc274d975f527e091a3 100644
--- a/Framework/Kernel/inc/MantidKernel/TimeSeriesProperty.h
+++ b/Framework/Kernel/inc/MantidKernel/TimeSeriesProperty.h
@@ -157,6 +157,12 @@ public:
   void splitByTime(std::vector<SplittingInterval> &splitter,
                    std::vector<Property *> outputs,
                    bool isPeriodic) const override;
+
+  /// New split method
+  void splitByTimeVector(std::vector<DateAndTime> &splitter_time_vec,
+                         std::vector<int> &target_vec,
+                         std::vector<TimeSeriesProperty *> outputs);
+
   /// Fill a TimeSplitterType that will filter the events by matching
   void makeFilterByValue(std::vector<SplittingInterval> &split, double min,
                          double max, double TimeTolerance = 0.0,
diff --git a/Framework/Kernel/src/TimeSeriesProperty.cpp b/Framework/Kernel/src/TimeSeriesProperty.cpp
index 95c5a3b7573ad3dd9b93be803285264880a4317b..ae861726ee8db495486864551404041b22f69767 100644
--- a/Framework/Kernel/src/TimeSeriesProperty.cpp
+++ b/Framework/Kernel/src/TimeSeriesProperty.cpp
@@ -521,6 +521,152 @@ void TimeSeriesProperty<TYPE>::splitByTime(
   }
 }
 
+/// Split this TimeSeriresProperty by a vector of time with N entries,
+/// and by the target workspace index defined by target_vec
+/// Requirements:
+/// 1. vector outputs must be defined before this method is called;
+template <typename TYPE>
+void TimeSeriesProperty<TYPE>::splitByTimeVector(
+    std::vector<DateAndTime> &splitter_time_vec, std::vector<int> &target_vec,
+    std::vector<TimeSeriesProperty *> outputs) {
+  // check inputs
+  if (splitter_time_vec.size() != target_vec.size() + 1)
+    throw std::runtime_error("Input time vector's size does not match taget "
+                             "workspace index vector's size.");
+  // return if the output vector TimeSeriesProperties is not defined
+  if (outputs.empty())
+    return;
+
+  // sort if necessary
+  sortIfNecessary();
+
+  // work on m_values, m_size, and m_time
+  std::vector<Kernel::DateAndTime> tsp_time_vec = this->timesAsVector();
+
+  // go over both filter time vector and time series property time vector
+  size_t index_splitter = 0;
+  size_t index_tsp_time = 0;
+
+  DateAndTime tsp_time = tsp_time_vec[index_tsp_time];
+  DateAndTime split_start_time = splitter_time_vec[index_splitter];
+  DateAndTime split_stop_time = splitter_time_vec[index_splitter + 1];
+
+  // move splitter index such that the first entry of TSP is before the stop
+  // time of a splitter
+  bool continue_search = true;
+  bool no_entry_in_range = false;
+
+  std::vector<DateAndTime>::iterator splitter_iter;
+  splitter_iter = std::lower_bound(splitter_time_vec.begin(),
+                                   splitter_time_vec.end(), tsp_time);
+  if (splitter_iter == splitter_time_vec.begin()) {
+    // do nothing as the first TimeSeriesProperty entry's time is before any
+    // splitters
+    ;
+  } else if (splitter_iter == splitter_time_vec.end()) {
+    // already search to the last splitter which is still earlier than first TSP
+    // entry
+    no_entry_in_range = true;
+  } else {
+    // calculate the splitter's index (now we check the stop time)
+    index_splitter = splitter_iter - splitter_time_vec.begin() - 1;
+    split_start_time = splitter_time_vec[index_splitter];
+    split_stop_time = splitter_time_vec[index_splitter + 1];
+  }
+
+  g_log.debug() << "TSP entry: " << index_tsp_time
+                << ", Splitter index = " << index_splitter << "\n";
+
+  // move along the entries to find the entry inside the current splitter
+  if (!no_entry_in_range) {
+    std::vector<DateAndTime>::iterator tsp_time_iter;
+    tsp_time_iter = std::lower_bound(tsp_time_vec.begin(), tsp_time_vec.end(),
+                                     split_start_time);
+    if (tsp_time_iter == tsp_time_vec.end()) {
+      // the first splitter's start time is LATER than the last TSP entry, then
+      // there won't be any
+      // TSP entry to be split into any target splitter.
+      no_entry_in_range = true;
+    } else {
+      // first splitter start time is between tsp_time_iter and the one before
+      // it.
+      // so the index for tsp_time_iter is the first TSP entry in the splitter
+      index_tsp_time = tsp_time_iter - tsp_time_vec.begin();
+      tsp_time = *tsp_time_iter; // tsp_time_vec[index_splitter];
+    }
+  }
+
+  g_log.debug() << "TSP entry: " << index_tsp_time
+                << ", Splitter index = " << index_splitter << "\n";
+
+  // now it is the time to put TSP's entries to corresponding
+  continue_search = !no_entry_in_range;
+  while (continue_search) {
+    // get the first entry index
+    if (index_tsp_time > 0)
+      --index_tsp_time;
+
+    int target = target_vec[index_splitter];
+
+    g_log.debug() << "Target = " << target
+                  << " with splitter index = " << index_splitter << "\n"
+                  << "\t"
+                  << "Time index = " << index_tsp_time << "\n\n";
+
+    bool continue_add = true;
+    while (continue_add) {
+      // add current entry
+      g_log.debug() << "Add entry " << index_tsp_time << " to target " << target
+                    << "\n";
+      if (outputs[target]->size() == 0 ||
+          outputs[target]->lastTime() < tsp_time) {
+        // avoid to add duplicate entry
+        outputs[target]->addValue(m_values[index_tsp_time].time(),
+                                  m_values[index_tsp_time].value());
+      }
+
+      // advance to next entry
+      ++index_tsp_time;
+
+      g_log.debug() << "\tEntry time " << tsp_time_vec[index_tsp_time]
+                    << ", stop time " << split_stop_time << "\n";
+
+      if (index_tsp_time == tsp_time_vec.size()) {
+        // last entry. quit all loops
+        continue_add = false;
+        continue_search = false;
+      } else if (tsp_time_vec[index_tsp_time] > split_stop_time) {
+        // next entry is out of this splitter: add the next one and quit
+        if (outputs[target]->lastTime() < m_values[index_tsp_time].time()) {
+          // avoid the duplicate cases occured in fast frequency issue
+          outputs[target]->addValue(m_values[index_tsp_time].time(),
+                                    m_values[index_tsp_time].value());
+        }
+        // FIXME - in future, need to find out WHETHER there is way to skip the
+        // rest without going through the whole sequence
+        continue_add = false;
+        // reset time entry as the next splitter will add
+        // --index_tsp_time;
+      } else {
+        // advance to next time
+        tsp_time = tsp_time_vec[index_tsp_time];
+      }
+    } // END-WHILE continue add
+
+    // make splitters to advance to next
+    ++index_splitter;
+    if (index_splitter == splitter_time_vec.size() - 1) {
+      // already last splitters
+      continue_search = false;
+    } else {
+      split_start_time = split_stop_time;
+      split_stop_time = splitter_time_vec[index_splitter + 1];
+    }
+  } // END-OF-WHILE
+
+  return;
+}
+
 // The makeFilterByValue & expandFilterToRange methods generate a bunch of
 // warnings when the template type is the wider integer types
 // (when it's being assigned back to a double such as in a call to minValue or
diff --git a/Framework/Kernel/test/TimeSeriesPropertyTest.h b/Framework/Kernel/test/TimeSeriesPropertyTest.h
index 41036cfa2cf8a88ea4856f78398570929b36efa4..265c714aedbeeeb4842f1be241378310ab992f3d 100644
--- a/Framework/Kernel/test/TimeSeriesPropertyTest.h
+++ b/Framework/Kernel/test/TimeSeriesPropertyTest.h
@@ -711,6 +711,259 @@ public:
     delete outputs[0];
   }
 
+  //----------------------------------------------------------------------------
+  /**
+   * otuput 0 has entries: 3
+   * otuput 1 has entries: 5
+   * otuput 2 has entries: 2
+   * otuput 3 has entries: 7
+   * @brief test_splitByTimeVector
+   */
+  void test_splitByTimeVector() {
+    // create the splitters
+    std::vector<DateAndTime> split_time_vec;
+    split_time_vec.push_back(DateAndTime("2007-11-30T16:17:10"));
+    split_time_vec.push_back(DateAndTime("2007-11-30T16:17:40"));
+    split_time_vec.push_back(DateAndTime("2007-11-30T16:17:55"));
+    split_time_vec.push_back(DateAndTime("2007-11-30T16:17:56"));
+    split_time_vec.push_back(DateAndTime("2007-11-30T16:18:09"));
+    split_time_vec.push_back(DateAndTime("2007-11-30T16:18:45"));
+    split_time_vec.push_back(DateAndTime("2007-11-30T16:22:50"));
+
+    std::vector<int> split_target_vec;
+    split_target_vec.push_back(1);
+    split_target_vec.push_back(0);
+    split_target_vec.push_back(2);
+    split_target_vec.push_back(0);
+    split_target_vec.push_back(1);
+    split_target_vec.push_back(3);
+
+    TimeSeriesProperty<int> log("test log");
+    log.addValue(DateAndTime("2007-11-30T16:17:00"), 1);
+    log.addValue(DateAndTime("2007-11-30T16:17:30"), 2);
+    log.addValue(DateAndTime("2007-11-30T16:18:00"), 3);
+    log.addValue(DateAndTime("2007-11-30T16:18:30"), 4);
+    log.addValue(DateAndTime("2007-11-30T16:19:00"), 5);
+    log.addValue(DateAndTime("2007-11-30T16:19:30"), 6);
+    log.addValue(DateAndTime("2007-11-30T16:20:00"), 7);
+    log.addValue(DateAndTime("2007-11-30T16:20:30"), 8);
+    log.addValue(DateAndTime("2007-11-30T16:21:00"), 9);
+    log.addValue(DateAndTime("2007-11-30T16:21:30"), 10);
+
+    std::vector<TimeSeriesProperty<int> *> outputs;
+    for (int itarget = 0; itarget < 4; ++itarget) {
+      TimeSeriesProperty<int> *tsp = new TimeSeriesProperty<int>("target");
+      outputs.push_back(tsp);
+    }
+
+    log.splitByTimeVector(split_time_vec, split_target_vec, outputs);
+
+    // Exam the split entries
+    TimeSeriesProperty<int> *out_0 = outputs[0];
+    // FIXME - Check whether out_0 is correct!
+    TS_ASSERT_EQUALS(out_0->size(), 3);
+    TS_ASSERT_EQUALS(out_0->nthValue(0), 2);
+    TS_ASSERT_EQUALS(out_0->nthValue(1), 3);
+    TS_ASSERT_EQUALS(out_0->nthValue(2), 4);
+
+    TimeSeriesProperty<int> *out_1 = outputs[1];
+    TS_ASSERT_EQUALS(out_1->size(), 5);
+    TS_ASSERT_EQUALS(out_1->nthValue(0), 1);
+    TS_ASSERT_EQUALS(out_1->nthValue(1), 2);
+    TS_ASSERT_EQUALS(out_1->nthValue(2), 3);
+    TS_ASSERT_EQUALS(out_1->nthValue(3), 4);
+    TS_ASSERT_EQUALS(out_1->nthValue(4), 5);
+
+    TimeSeriesProperty<int> *out_2 = outputs[2];
+    TS_ASSERT_EQUALS(out_2->size(), 2);
+    TS_ASSERT_EQUALS(out_2->nthValue(0), 2);
+    TS_ASSERT_EQUALS(out_2->nthValue(1), 3);
+
+    TimeSeriesProperty<int> *out_3 = outputs[3];
+    TS_ASSERT_EQUALS(out_3->size(), 7);
+    // out[3] should have entries: 4, 5, 6, 7, 8, 9, 10
+    for (int j = 0; j < out_3->size(); ++j) {
+      TS_ASSERT_EQUALS(out_3->nthValue(j), j + 4);
+    }
+
+    return;
+  }
+
+  //----------------------------------------------------------------------------
+  /** last splitter is before first entry
+   * @brief test_splitByTimeVectorEarlySplitter
+   */
+  void test_splitByTimeVectorEarlySplitter() {
+    // create the splitters
+    std::vector<DateAndTime> split_time_vec;
+    split_time_vec.push_back(DateAndTime("2007-11-30T16:00:10"));
+    split_time_vec.push_back(DateAndTime("2007-11-30T16:00:40"));
+    split_time_vec.push_back(DateAndTime("2007-11-30T16:07:55"));
+    split_time_vec.push_back(DateAndTime("2007-11-30T16:07:56"));
+    split_time_vec.push_back(DateAndTime("2007-11-30T16:08:09"));
+    split_time_vec.push_back(DateAndTime("2007-11-30T16:08:45"));
+    split_time_vec.push_back(DateAndTime("2007-11-30T16:12:50"));
+
+    std::vector<int> split_target_vec;
+    split_target_vec.push_back(1);
+    split_target_vec.push_back(0);
+    split_target_vec.push_back(2);
+    split_target_vec.push_back(0);
+    split_target_vec.push_back(1);
+    split_target_vec.push_back(3);
+
+    TimeSeriesProperty<int> log("test log");
+    log.addValue(DateAndTime("2007-11-30T16:17:00"), 1);
+    log.addValue(DateAndTime("2007-11-30T16:17:30"), 2);
+    log.addValue(DateAndTime("2007-11-30T16:18:00"), 3);
+    log.addValue(DateAndTime("2007-11-30T16:18:30"), 4);
+    log.addValue(DateAndTime("2007-11-30T16:19:00"), 5);
+    log.addValue(DateAndTime("2007-11-30T16:19:30"), 6);
+    log.addValue(DateAndTime("2007-11-30T16:20:00"), 7);
+    log.addValue(DateAndTime("2007-11-30T16:20:30"), 8);
+    log.addValue(DateAndTime("2007-11-30T16:21:00"), 9);
+    log.addValue(DateAndTime("2007-11-30T16:21:30"), 10);
+
+    // Initialze the 4 splitters
+    std::vector<TimeSeriesProperty<int> *> outputs;
+    for (int itarget = 0; itarget < 4; ++itarget) {
+      TimeSeriesProperty<int> *tsp = new TimeSeriesProperty<int>("target");
+      outputs.push_back(tsp);
+    }
+
+    log.splitByTimeVector(split_time_vec, split_target_vec, outputs);
+
+    // check
+    for (int i = 0; i < 4; ++i) {
+      TimeSeriesProperty<int> *out_i = outputs[i];
+      TS_ASSERT_EQUALS(out_i->size(), 0);
+    }
+
+    return;
+  }
+
+  //----------------------------------------------------------------------------
+  /** first splitter is after last entry
+   * @brief test_splitByTimeVectorLaterSplitter
+   */
+  void test_splitByTimeVectorLaterSplitter() {
+    // create the splitters
+    std::vector<DateAndTime> split_time_vec;
+    split_time_vec.push_back(DateAndTime("2007-12-30T16:00:10"));
+    split_time_vec.push_back(DateAndTime("2007-12-30T16:00:40"));
+    split_time_vec.push_back(DateAndTime("2007-12-30T16:07:55"));
+    split_time_vec.push_back(DateAndTime("2007-12-30T16:07:56"));
+    split_time_vec.push_back(DateAndTime("2007-12-30T16:08:09"));
+    split_time_vec.push_back(DateAndTime("2007-12-30T16:08:45"));
+    split_time_vec.push_back(DateAndTime("2007-12-30T16:12:50"));
+
+    std::vector<int> split_target_vec;
+    split_target_vec.push_back(1);
+    split_target_vec.push_back(0);
+    split_target_vec.push_back(2);
+    split_target_vec.push_back(0);
+    split_target_vec.push_back(1);
+    split_target_vec.push_back(3);
+
+    // create test log
+    TimeSeriesProperty<int> log("test log");
+    log.addValue(DateAndTime("2007-11-30T16:17:00"), 1);
+    log.addValue(DateAndTime("2007-11-30T16:17:30"), 2);
+    log.addValue(DateAndTime("2007-11-30T16:18:00"), 3);
+    log.addValue(DateAndTime("2007-11-30T16:18:30"), 4);
+    log.addValue(DateAndTime("2007-11-30T16:19:00"), 5);
+    log.addValue(DateAndTime("2007-11-30T16:19:30"), 6);
+    log.addValue(DateAndTime("2007-11-30T16:20:00"), 7);
+    log.addValue(DateAndTime("2007-11-30T16:20:30"), 8);
+    log.addValue(DateAndTime("2007-11-30T16:21:00"), 9);
+    log.addValue(DateAndTime("2007-11-30T16:21:30"), 10);
+
+    // Initialze the 4 splitters
+    std::vector<TimeSeriesProperty<int> *> outputs;
+    for (int itarget = 0; itarget < 4; ++itarget) {
+      TimeSeriesProperty<int> *tsp = new TimeSeriesProperty<int>("target");
+      outputs.push_back(tsp);
+    }
+
+    log.splitByTimeVector(split_time_vec, split_target_vec, outputs);
+
+    // check
+    for (int i = 0; i < 4; ++i) {
+      TimeSeriesProperty<int> *out_i = outputs[i];
+      TS_ASSERT_EQUALS(out_i->size(), 0);
+    }
+  }
+
+  //----------------------------------------------------------------------------
+  /** high-frequency splitters splits a slow change log
+   * @brief test_splitByTimeVectorFastLogSplitter
+   */
+  void test_splitByTimeVectorFastLogSplitter() {
+    // create test log
+    TimeSeriesProperty<int> log("test log");
+    log.addValue(DateAndTime("2007-11-30T16:17:00"), 1);
+    log.addValue(DateAndTime("2007-11-30T16:17:30"), 2);
+    log.addValue(DateAndTime("2007-11-30T16:18:00"), 3);
+    log.addValue(DateAndTime("2007-11-30T16:18:30"), 4);
+    log.addValue(DateAndTime("2007-11-30T16:19:00"), 5);
+    log.addValue(DateAndTime("2007-11-30T16:19:30"), 6);
+    log.addValue(DateAndTime("2007-11-30T16:20:00"), 7);
+    log.addValue(DateAndTime("2007-11-30T16:20:30"), 8);
+    log.addValue(DateAndTime("2007-11-30T16:21:00"), 9);
+    log.addValue(DateAndTime("2007-11-30T16:21:30"), 10);
+
+    // create a high frequency splitter
+    DateAndTime split_time("2007-11-30T16:17:00");
+    int64_t dt = 100 * 1000;
+
+    std::vector<DateAndTime> vec_split_times;
+    std::vector<int> vec_split_target;
+
+    for (int i = 0; i < 10; ++i) {
+      for (int j = 0; j < 10; ++j) {
+        vec_split_times.push_back(split_time);
+        split_time += dt;
+        vec_split_target.push_back(j);
+      }
+    }
+
+    // push back last split-time (split stop)
+    vec_split_times.push_back(split_time);
+
+    // Initialze the 10 splitters
+    std::vector<TimeSeriesProperty<int> *> outputs;
+    for (int itarget = 0; itarget < 10; ++itarget) {
+      TimeSeriesProperty<int> *tsp = new TimeSeriesProperty<int>("target");
+      outputs.push_back(tsp);
+    }
+
+    /*
+    size_t num_splits = vec_split_target.size();
+    for (size_t i = 0; i < num_splits; ++i) {
+      std::cout << "s[" << i << "]  start = " << vec_split_times[i]
+                << ", stop = " << vec_split_times[i + 1]
+                << ":  target = " << vec_split_target[i] << "\n";
+    }
+    */
+
+    // split time series property
+    log.splitByTimeVector(vec_split_times, vec_split_target, outputs);
+
+    // TODO/FIXME/ - continue to debug from here!
+    /*
+    TimeSeriesProperty<int> *out0 = outputs[0];
+    for (int i = 0; i < out0->size(); ++i) {
+      std::cout << i << "-th: " << out0->nthTime(i) << ", " << out0->nthValue(i)
+                << "\n";
+    }
+    */
+
+    // test
+    for (size_t i = 0; i < 10; ++i) {
+      TS_ASSERT_EQUALS(outputs[i]->size(), 2);
+    }
+  }
+
   //----------------------------------------------------------------------------
   void test_statistics() {
     TimeSeriesProperty<double> *log =
diff --git a/Framework/PythonInterface/mantid/api/src/Exports/DetectorInfo.cpp b/Framework/PythonInterface/mantid/api/src/Exports/DetectorInfo.cpp
index b81f37ce4c34ee781b16f026413fee81143eab4c..e7e1bb7bf6003875051777b3c3d61a630a6d6d57 100644
--- a/Framework/PythonInterface/mantid/api/src/Exports/DetectorInfo.cpp
+++ b/Framework/PythonInterface/mantid/api/src/Exports/DetectorInfo.cpp
@@ -9,6 +9,9 @@ void export_DetectorInfo() {
   // its functionality to Python, and should not yet be used in user scripts. DO
   // NOT ADD EXPORTS TO OTHER METHODS without contacting the team working on
   // Instrument-2.0.
+  bool (DetectorInfo::*isMonitor)(const size_t) const =
+      &DetectorInfo::isMonitor;
+  bool (DetectorInfo::*isMasked)(const size_t) const = &DetectorInfo::isMasked;
   class_<DetectorInfo, boost::noncopyable>("DetectorInfo", no_init)
       .def("__len__", &DetectorInfo::size, (arg("self")),
            "Returns the size of the DetectorInfo, i.e., the number of "
@@ -16,8 +19,8 @@ void export_DetectorInfo() {
       .def("size", &DetectorInfo::size, (arg("self")),
            "Returns the size of the DetectorInfo, i.e., the number of "
            "detectors in the instrument.")
-      .def("isMonitor", &DetectorInfo::isMonitor, (arg("self"), arg("index")),
+      .def("isMonitor", isMonitor, (arg("self"), arg("index")),
            "Returns True if the detector is a monitor.")
-      .def("isMasked", &DetectorInfo::isMasked, (arg("self"), arg("index")),
+      .def("isMasked", isMasked, (arg("self"), arg("index")),
            "Returns True if the detector is masked.");
 }
diff --git a/Framework/PythonInterface/plugins/algorithms/FindEPP.py b/Framework/PythonInterface/plugins/algorithms/FindEPP.py
index e2f5613bf5da3e553f542c47f48cc03f90a71060..98af7158589a6bd230763173d950fda47e375271 100644
--- a/Framework/PythonInterface/plugins/algorithms/FindEPP.py
+++ b/Framework/PythonInterface/plugins/algorithms/FindEPP.py
@@ -2,7 +2,7 @@
 from __future__ import (absolute_import, division, print_function)
 from mantid.api import PythonAlgorithm, AlgorithmFactory, MatrixWorkspaceProperty, ITableWorkspaceProperty
 from mantid.kernel import Direction
-from mantid.simpleapi import Fit, CreateEmptyTableWorkspace
+from mantid.simpleapi import CreateEmptyTableWorkspace, DeleteWorkspace, Fit
 import numpy as np
 
 
@@ -92,11 +92,11 @@ class FindEPP(PythonAlgorithm):
         startX = tryCentre - 3.0*fwhm
         endX   = tryCentre + 3.0*fwhm
 
+        tempOutputPrefix = "__EPPfit_" + str(self.workspace) + "_" + str(index)
         # pylint: disable=assignment-from-none
         # result = fitStatus, chiSq, covarianceTable, paramTable
         result = Fit(InputWorkspace=self.workspace, WorkspaceIndex=index, StartX = startX, EndX=endX,
-                     Output='EPPfit', Function=fitFun, CreateOutput=True, OutputParametersOnly=True)
-
+                     Output=tempOutputPrefix, Function=fitFun, CreateOutput=True, OutputParametersOnly=True)
         return result
 
     def PyExec(self):
@@ -129,10 +129,9 @@ class FindEPP(PythonAlgorithm):
                     name = row["Name"]
                     nextrow[name] = row["Value"]
                     nextrow[name+"Error"] = row["Error"]
-
-            # self.log().debug("Next row= " + str(nextrow))
+                DeleteWorkspace(result.OutputParameters)
+                DeleteWorkspace(result.OutputNormalisedCovarianceMatrix)
             outws.addRow(nextrow)
-
         self.setProperty("OutputWorkspace", outws)
         return
 
diff --git a/Framework/PythonInterface/plugins/algorithms/MatchPeaks.py b/Framework/PythonInterface/plugins/algorithms/MatchPeaks.py
index c164fc407fff681b79dbbba851c3868f0b4ea735..158b3cf90d0eb86c8eb9de38f87f5d3a31ba63f7 100644
--- a/Framework/PythonInterface/plugins/algorithms/MatchPeaks.py
+++ b/Framework/PythonInterface/plugins/algorithms/MatchPeaks.py
@@ -122,21 +122,27 @@ class MatchPeaks(PythonAlgorithm):
 
         if input3:
             if not input2:
-                issues['InputWorkspace2'] = 'When input 3 is given, input 2 is also required.'
+                issues['InputWorkspace2'] = 'When InputWorkspace3 is given, InputWorkspace2 is also required.'
             else:
-                if mtd[input3].blocksize() != mtd[input2].blocksize():
-                    issues['InputWorkspace3'] = 'Incompatible same number of bins'
-                if mtd[input3].getNumberHistograms() != mtd[input2].getNumberHistograms():
+                if mtd[input3].isDistribution() and not mtd[input2].isDistribution():
+                    issues['InputWorkspace3'] = 'InputWorkspace2 and InputWorkspace3 must be either point data or ' \
+                                                'histogram data'
+                elif mtd[input3].blocksize() != mtd[input2].blocksize():
+                    issues['InputWorkspace3'] = 'Incompatible number of bins'
+                elif mtd[input3].getNumberHistograms() != mtd[input2].getNumberHistograms():
                     issues['InputWorkspace3'] = 'Incompatible number of spectra'
-                if np.all(mtd[input3].extractX() - mtd[input2].extractX()):
+                elif np.any(mtd[input3].extractX() - mtd[input2].extractX()):
                     issues['InputWorkspace3'] = 'Incompatible x-values'
 
         if input2:
-            if mtd[input1].blocksize() != mtd[input2].blocksize():
-                issues['InputWorkspace2'] = 'Incompatible same number of bins'
-            if mtd[input1].getNumberHistograms() != mtd[input2].getNumberHistograms():
+            if mtd[input1].isDistribution() and not mtd[inout2].isDistribution():
+                issues['InputWorkspace2'] = 'InputWorkspace2 and InputWorkspace3 must be either point data or ' \
+                                            'histogram data'
+            elif mtd[input1].blocksize() != mtd[input2].blocksize():
+                issues['InputWorkspace2'] = 'Incompatible number of bins'
+            elif mtd[input1].getNumberHistograms() != mtd[input2].getNumberHistograms():
                 issues['InputWorkspace2'] = 'Incompatible number of spectra'
-            if np.all(mtd[input1].extractX() - mtd[input2].extractX()):
+            elif np.any(mtd[input1].extractX() - mtd[input2].extractX()):
                 issues['InputWorkspace2'] = 'Incompatible x-values'
 
         return issues
@@ -273,14 +279,6 @@ class MatchPeaks(PythonAlgorithm):
 
         # Clean-up unused TableWorkspaces in try-catch
         # Direct deletion causes problems when running in parallel for too many workspaces
-        try:
-            DeleteWorkspace('EPPfit_Parameters')
-        except ValueError:
-            logger.debug('Fit parameters workspace already deleted')
-        try:
-            DeleteWorkspace('EPPfit_NormalisedCovarianceMatrix')
-        except ValueError:
-            logger.debug('Fit covariance matrix already deleted')
         try:
             DeleteWorkspace(fit_table)
         except ValueError:
diff --git a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSLoad.py b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSLoad.py
new file mode 100644
index 0000000000000000000000000000000000000000..ddbc4fafa63d2324ed956f864ebd828164745a8a
--- /dev/null
+++ b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSLoad.py
@@ -0,0 +1,369 @@
+# pylint: disable=invalid-name
+
+""" SANSLoad algorithm which handles loading SANS files"""
+
+from mantid.kernel import (Direction, PropertyManagerProperty, FloatArrayProperty,
+                           EnabledWhenProperty, PropertyCriterion)
+from mantid.api import (DataProcessorAlgorithm, MatrixWorkspaceProperty, AlgorithmFactory, PropertyMode, Progress,
+                        WorkspaceProperty)
+
+from sans.state.state_base import create_deserialized_sans_state_from_property_manager
+from sans.common.enums import SANSDataType
+from sans.common.general_functions import create_unmanaged_algorithm
+from sans.algorithm_detail.load_data import SANSLoadDataFactory
+
+
+class SANSLoad(DataProcessorAlgorithm):
+    def category(self):
+        return 'SANS\\Load'
+
+    def summary(self):
+        return 'Load SANS data'
+
+    def PyInit(self):
+        # ----------
+        # INPUT
+        # ----------
+        self.declareProperty(PropertyManagerProperty('SANSState'),
+                             doc='A property manager which fulfills the SANSState contract.')
+
+        self.declareProperty("PublishToCache", True, direction=Direction.Input,
+                             doc="Publish the calibration workspace to a cache, in order to avoid reloading "
+                                 "for subsequent runs.")
+
+        self.declareProperty("UseCached", True, direction=Direction.Input,
+                             doc="Checks if there are loaded files available. If they are, those files are used.")
+
+        self.declareProperty("MoveWorkspace", defaultValue=False, direction=Direction.Input,
+                             doc="Move the workspace according to the SANSState setting. This might be useful"
+                             "for manual inspection.")
+
+        # Beam coordinates if an initial move of the workspace is requested
+        enabled_condition = EnabledWhenProperty("MoveWorkspace", PropertyCriterion.IsNotDefault)
+        self.declareProperty(FloatArrayProperty(name='BeamCoordinates', values=[]),
+                             doc='The coordinates which is used to position the instrument component(s). '
+                                 'If the workspaces should be loaded with an initial move, then this '
+                                 'needs to be specified.')
+        # Components which are to be moved
+        self.declareProperty('Component', '', direction=Direction.Input,
+                             doc='Component that should be moved. '
+                                 'If the workspaces should be loaded with an initial move, then this '
+                                 'needs to be specified.')
+        self.setPropertySettings("BeamCoordinates", enabled_condition)
+        self.setPropertySettings("Component", enabled_condition)
+
+        # ------------
+        #  OUTPUT
+        # ------------
+        default_number_of_workspaces = 0
+
+        # Sample Scatter Workspaces
+        self.declareProperty(WorkspaceProperty('SampleScatterWorkspace', '',
+                                               optional=PropertyMode.Optional, direction=Direction.Output),
+                             doc='The sample scatter workspace. This workspace does not contain monitors.')
+        self.declareProperty(WorkspaceProperty('SampleScatterMonitorWorkspace', '',
+                                               optional=PropertyMode.Optional, direction=Direction.Output),
+                             doc='The sample scatter monitor workspace. This workspace only contains monitors.')
+        self.declareProperty(MatrixWorkspaceProperty('SampleTransmissionWorkspace', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Output),
+                             doc='The sample transmission workspace.')
+        self.declareProperty(MatrixWorkspaceProperty('SampleDirectWorkspace', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Output),
+                             doc='The sample scatter direct workspace.')
+
+        self.setPropertyGroup("SampleScatterWorkspace", 'Sample')
+        self.setPropertyGroup("SampleScatterMonitorWorkspace", 'Sample')
+        self.setPropertyGroup("SampleTransmissionWorkspace", 'Sample')
+        self.setPropertyGroup("SampleDirectWorkspace", 'Sample')
+
+        # Number of sample workspaces
+        self.declareProperty('NumberOfSampleScatterWorkspaces', defaultValue=default_number_of_workspaces,
+                             direction=Direction.Output,
+                             doc='The number of workspace for sample scatter.')
+        self.declareProperty('NumberOfSampleTransmissionWorkspaces', defaultValue=default_number_of_workspaces,
+                             direction=Direction.Output,
+                             doc='The number of workspace for sample transmission.')
+        self.declareProperty('NumberOfSampleDirectWorkspaces', defaultValue=default_number_of_workspaces,
+                             direction=Direction.Output,
+                             doc='The number of workspace for sample direct.')
+
+        self.declareProperty(MatrixWorkspaceProperty('CanScatterWorkspace', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Output),
+                             doc='The can scatter workspace. This workspace does not contain monitors.')
+        self.declareProperty(MatrixWorkspaceProperty('CanScatterMonitorWorkspace', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Output),
+                             doc='The can scatter monitor workspace. This workspace only contains monitors.')
+        self.declareProperty(MatrixWorkspaceProperty('CanTransmissionWorkspace', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Output),
+                             doc='The can transmission workspace.')
+        self.declareProperty(MatrixWorkspaceProperty('CanDirectWorkspace', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Output),
+                             doc='The sample scatter direct workspace.')
+        self.setPropertyGroup("CanScatterWorkspace", 'Can')
+        self.setPropertyGroup("CanScatterMonitorWorkspace", 'Can')
+        self.setPropertyGroup("CanTransmissionWorkspace", 'Can')
+        self.setPropertyGroup("CanDirectWorkspace", 'Can')
+
+        self.declareProperty('NumberOfCanScatterWorkspaces', defaultValue=default_number_of_workspaces,
+                             direction=Direction.Output,
+                             doc='The number of workspace for can scatter.')
+        self.declareProperty('NumberOfCanTransmissionWorkspaces', defaultValue=default_number_of_workspaces,
+                             direction=Direction.Output,
+                             doc='The number of workspace for can transmission.')
+        self.declareProperty('NumberOfCanDirectWorkspaces', defaultValue=default_number_of_workspaces,
+                             direction=Direction.Output,
+                             doc='The number of workspace for can direct.')
+
+    def PyExec(self):
+        # Read the state
+        state_property_manager = self.getProperty("SANSState").value
+        state = create_deserialized_sans_state_from_property_manager(state_property_manager)
+
+        # Run the appropriate SANSLoader and get the workspaces and the workspace monitors
+        # Note that cache optimization is only applied to the calibration workspace since it is not available as a
+        # return property and it is also something which is most likely not to change between different reductions.
+        use_cached = self.getProperty("UseCached").value
+        publish_to_ads = self.getProperty("PublishToCache").value
+
+        data = state.data
+        progress = self._get_progress_for_file_loading(data)
+
+        # Get the correct SANSLoader from the SANSLoaderFactory
+        load_factory = SANSLoadDataFactory()
+        loader = load_factory.create_loader(state)
+
+        workspaces, workspace_monitors = loader.execute(data_info=data, use_cached=use_cached,
+                                                        publish_to_ads=publish_to_ads, progress=progress)
+        progress.report("Loaded the data.")
+
+        # Check if a move has been requested and perform it. This can be useful if scientists want to load the data and
+        # have it moved in order to inspect it with other tools
+        move_workspaces = self.getProperty("MoveWorkspace").value
+        if move_workspaces:
+            progress_move = Progress(self, start=0.8, end=1.0, nreports=2)
+            progress_move.report("Starting to move the workspaces.")
+            self._perform_initial_move(workspaces, state)
+            progress_move.report("Finished moving the workspaces.")
+
+        # Set output workspaces
+        for workspace_type, workspace in list(workspaces.items()):
+            self.set_output_for_workspaces(workspace_type, workspace)
+
+        # Set the output monitor workspaces
+        for workspace_type, workspace in list(workspace_monitors.items()):
+            self.set_output_for_monitor_workspaces(workspace_type, workspace)
+
+    def validateInputs(self):
+        errors = dict()
+        # Check that the input can be converted into the right state object
+        state_property_manager = self.getProperty("SANSState").value
+        try:
+            state = create_deserialized_sans_state_from_property_manager(state_property_manager)
+            state.property_manager = state_property_manager
+            state.validate()
+        except ValueError as err:
+            errors.update({"SANSState": str(err)})
+
+        # We need to validate that the for each expected output workspace of the SANSState a output workspace name
+        # was supplied in the PyInit
+        # For sample scatter
+        sample_scatter = self.getProperty("SampleScatterWorkspace").value
+        sample_scatter_as_string = self.getProperty("SampleScatterWorkspace").valueAsStr
+        if sample_scatter is None and not sample_scatter_as_string:
+            errors.update({"SampleScatterWorkspace": "A sample scatter output workspace needs to be specified."})
+
+        # For sample scatter monitor
+        sample_scatter_monitor = self.getProperty("SampleScatterMonitorWorkspace").value
+        sample_scatter_monitor_as_string = self.getProperty("SampleScatterMonitorWorkspace").valueAsStr
+        if sample_scatter_monitor is None and not sample_scatter_monitor_as_string:
+            errors.update({"SampleScatterMonitorWorkspace": "A sample scatter output workspace needs to be specified."})
+
+        # ------------------------------------
+        # Check the optional output workspaces
+        # If they are specified in the SANSState, then we require them to be set on the output as well.
+        state = create_deserialized_sans_state_from_property_manager(state_property_manager)
+        data_info = state.data
+
+        # For sample transmission
+        sample_transmission = self.getProperty("SampleTransmissionWorkspace").value
+        sample_transmission_as_string = self.getProperty("SampleTransmissionWorkspace").valueAsStr
+        sample_transmission_was_set = sample_transmission is not None or len(sample_transmission_as_string) > 0
+
+        sample_transmission_from_state = data_info.sample_transmission
+        if not sample_transmission_was_set and sample_transmission_from_state is not None:
+            errors.update({"SampleTransmissionWorkspace": "You need to set the output for the sample transmission"
+                                                          " workspace since it is specified to be loaded in your "
+                                                          "reduction configuration."})
+        if sample_transmission_was_set and sample_transmission_from_state is None:
+            errors.update({"SampleTransmissionWorkspace": "You set an output workspace for sample transmission, "
+                                                          "although none is specified in the reduction configuration."})
+
+        # For sample direct
+        sample_direct = self.getProperty("SampleDirectWorkspace").value
+        sample_direct_as_string = self.getProperty("SampleDirectWorkspace").valueAsStr
+        sample_direct_was_set = sample_direct is not None or len(sample_direct_as_string) > 0
+
+        sample_direct_from_state = data_info.sample_direct
+        if not sample_direct_was_set and sample_direct_from_state is not None:
+            errors.update({"SampleDirectWorkspace": "You need to set the output for the sample direct"
+                                                    " workspace since it is specified to be loaded in your "
+                                                    "reduction configuration."})
+        if sample_direct_was_set and sample_direct_from_state is None:
+            errors.update({"SampleDirectWorkspace": "You set an output workspace for sample direct, "
+                                                    "although none is specified in the reduction configuration."})
+
+        # For can scatter + monitor
+        can_scatter = self.getProperty("CanScatterWorkspace").value
+        can_scatter_as_string = self.getProperty("CanScatterWorkspace").valueAsStr
+        can_scatter_was_set = can_scatter is not None or len(can_scatter_as_string) > 0
+
+        can_scatter_from_state = data_info.can_scatter
+        if not can_scatter_was_set and can_scatter_from_state is not None:
+            errors.update({"CanScatterWorkspace": "You need to set the output for the can scatter"
+                                                  " workspace since it is specified to be loaded in your "
+                                                  "reduction configuration."})
+        if can_scatter_was_set and can_scatter_from_state is None:
+            errors.update({"CanScatterWorkspace": "You set an output workspace for can scatter, "
+                                                  "although none is specified in the reduction configuration."})
+
+        # For can scatter monitor
+        can_scatter_monitor = self.getProperty("CanScatterMonitorWorkspace").value
+        can_scatter_monitor_as_string = self.getProperty("CanScatterMonitorWorkspace").valueAsStr
+        can_scatter_monitor_was_set = can_scatter_monitor is not None or len(can_scatter_monitor_as_string) > 0
+        if not can_scatter_monitor_was_set and can_scatter_from_state is not None:
+            errors.update({"CanScatterMonitorWorkspace": "You need to set the output for the can scatter monitor"
+                                                         " workspace since it is specified to be loaded in your "
+                                                         "reduction configuration."})
+        if can_scatter_monitor_was_set and can_scatter_from_state is None:
+            errors.update({"CanScatterMonitorWorkspace": "You set an output workspace for can scatter monitor, "
+                                                         "although none is specified in the reduction configuration."})
+
+        # For sample transmission
+        can_transmission = self.getProperty("CanTransmissionWorkspace").value
+        can_transmission_as_string = self.getProperty("CanTransmissionWorkspace").valueAsStr
+        can_transmission_was_set = can_transmission is not None or len(can_transmission_as_string) > 0
+        can_transmission_from_state = data_info.can_transmission
+        if not can_transmission_was_set and can_transmission_from_state is not None:
+            errors.update({"CanTransmissionWorkspace": "You need to set the output for the can transmission"
+                                                       " workspace since it is specified to be loaded in your "
+                                                       "reduction configuration."})
+        if can_transmission_was_set and can_transmission_from_state is None:
+            errors.update({"CanTransmissionWorkspace": "You set an output workspace for can transmission, "
+                                                       "although none is specified in the reduction configuration."})
+
+        # For can direct
+        can_direct = self.getProperty("CanDirectWorkspace").value
+        can_direct_as_string = self.getProperty("CanDirectWorkspace").valueAsStr
+        can_direct_was_set = can_direct is not None or len(can_direct_as_string) > 0
+        can_direct_from_state = data_info.can_direct
+        if not can_direct_was_set and can_direct_from_state is not None:
+            errors.update({"CanDirectWorkspace": "You need to set the output for the can direct"
+                                                 " workspace since it is specified to be loaded in your "
+                                                 "reduction configuration."})
+        if can_direct_was_set and can_direct_from_state is None:
+            errors.update({"CanDirectWorkspace": "You set an output workspace for can direct, "
+                                                 "although none is specified in the reduction configuration."})
+        return errors
+
+    def set_output_for_workspaces(self, workspace_type, workspaces):
+        if workspace_type is SANSDataType.SampleScatter:
+            self.set_property_with_number_of_workspaces("SampleScatterWorkspace", workspaces)
+        elif workspace_type is SANSDataType.SampleTransmission:
+            self.set_property_with_number_of_workspaces("SampleTransmissionWorkspace", workspaces)
+        elif workspace_type is SANSDataType.SampleDirect:
+            self.set_property_with_number_of_workspaces("SampleDirectWorkspace", workspaces)
+        elif workspace_type is SANSDataType.CanScatter:
+            self.set_property_with_number_of_workspaces("CanScatterWorkspace", workspaces)
+        elif workspace_type is SANSDataType.CanTransmission:
+            self.set_property_with_number_of_workspaces("CanTransmissionWorkspace", workspaces)
+        elif workspace_type is SANSDataType.CanDirect:
+            self.set_property_with_number_of_workspaces("CanDirectWorkspace", workspaces)
+        else:
+            raise RuntimeError("SANSLoad: Unknown data output workspace format: {0}".format(str(workspace_type)))
+
+    def set_output_for_monitor_workspaces(self, workspace_type, workspaces):
+        if workspace_type is SANSDataType.SampleScatter:
+            self.set_property("SampleScatterMonitorWorkspace", workspaces)
+        elif workspace_type is SANSDataType.CanScatter:
+            self.set_property("CanScatterMonitorWorkspace", workspaces)
+        else:
+            raise RuntimeError("SANSLoad: Unknown data output workspace format: {0}".format(str(workspace_type)))
+
+    def set_property(self, name, workspace_collection):
+        """
+        We receive a name for a property and a collection of workspaces. If the workspace is a group workspace, then
+        we dynamically create output properties and inform the user that he needs to query the output workspaces
+        individually and we need to communicate how many there are.
+        :param name: The name of the output property
+        :param workspace_collection: A list of workspaces which corresponds to the name. Note that normally there
+                                    there will be only one element in this list. Only when dealing with multiperiod
+                                    data can we expected to see more workspaces in the list.
+        """
+        if len(workspace_collection) > 1:
+            # Note that the first output is the same as we have set above.
+            counter = 1
+            for workspace in workspace_collection:
+                output_name = name + "_" + str(counter)
+                self.declareProperty(MatrixWorkspaceProperty(output_name, '',
+                                                             optional=PropertyMode.Optional,
+                                                             direction=Direction.Output),
+                                     doc='A child workspace of a multi-period file.')
+                # We need to set a name on here if one was set
+                user_specified_name = self.getProperty(name).valueAsStr
+                if user_specified_name:
+                    user_specified_name += "_" + str(counter)
+                    self.setProperty(output_name, user_specified_name)
+                self.setProperty(output_name, workspace)
+                counter += 1
+        else:
+            self.setProperty(name, workspace_collection[0])
+        return len(workspace_collection)
+
+    def set_property_with_number_of_workspaces(self, name, workspace_collection):
+        counter = self.set_property(name, workspace_collection)
+        # The property name for the number of workspaces
+        number_of_workspaces_name = "NumberOf" + name + "s"
+        self.setProperty(number_of_workspaces_name, counter)
+
+    def _perform_initial_move(self, workspaces, state):
+        move_name = "SANSMove"
+        state_dict = state.property_manager
+        move_options = {"SANSState": state_dict,
+                        "MoveType": "InitialMove"}
+
+        # If beam centre was specified then use it
+        beam_coordinates = self.getProperty("BeamCoordinates").value
+        if beam_coordinates:
+            move_options.update({"BeamCoordinates": beam_coordinates})
+
+        # If component was specified then use it
+        component = self.getProperty("Component").value
+        if beam_coordinates:
+            move_options.update({"Component": component})
+
+        move_alg = create_unmanaged_algorithm(move_name, **move_options)
+
+        # The workspaces are stored in a dict: workspace_names (sample_scatter, etc) : ListOfWorkspaces
+        for key, workspace_list in list(workspaces.items()):
+            for workspace in workspace_list:
+                move_alg.setProperty("Workspace", workspace)
+                move_alg.execute()
+
+    def _get_progress_for_file_loading(self, data):
+        # Get the number of workspaces which are to be loaded
+        number_of_files_to_load = sum(x is not None for x in [data.sample_scatter, data.sample_transmission,
+                                                              data.sample_direct, data.can_transmission,
+                                                              data.can_transmission, data.can_direct,
+                                                              data.calibration])
+        progress_steps = number_of_files_to_load + 1
+        # Check if there is a move operation to be performed
+        uses_move = self.getProperty("MoveWorkspace").value
+
+        # The partitioning of the progress bar is 80% for loading if there is a move else 100%
+        end = 0.8 if uses_move else 1.0
+        progress = Progress(self, start=0.0, end=end, nreports=progress_steps)
+        return progress
+
+
+# Register algorithm with Mantid
+AlgorithmFactory.subscribe(SANSLoad)
diff --git a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSMove.py b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSMove.py
new file mode 100644
index 0000000000000000000000000000000000000000..64cfff4006eead07b102d3b81ea5f3d385fec491
--- /dev/null
+++ b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSMove.py
@@ -0,0 +1,189 @@
+# pylint: disable=too-few-public-methods
+
+""" SANSMove algorithm to move a workspace according to the instrument settings."""
+
+from mantid.kernel import (Direction, PropertyManagerProperty, StringListValidator,
+                           FloatArrayProperty)
+from mantid.api import (DataProcessorAlgorithm, MatrixWorkspaceProperty, AlgorithmFactory, PropertyMode, Progress)
+
+from sans.algorithm_detail.move_workspaces import SANSMoveFactory
+from sans.state.state_base import create_deserialized_sans_state_from_property_manager
+from sans.common.enums import DetectorType
+
+
+class MoveType(object):
+    class InitialMove(object):
+        pass
+
+    class ElementaryDisplacement(object):
+        pass
+
+    class SetToZero(object):
+        pass
+
+
+def get_detector_for_component(move_info, component):
+    """
+    Get the detector for the selected component.
+
+    The detector can be either an actual component name or a HAB, LAB abbreviation
+    :param move_info: a SANSStateMove object
+    :param component: the selected component
+    :return: an equivalent detector to teh selected component or None
+    """
+    detectors = move_info.detectors
+    selected_detector = None
+    if component == "HAB":
+        selected_detector = detectors[DetectorType.to_string(DetectorType.HAB)]
+    elif component == "LAB":
+        selected_detector = detectors[DetectorType.to_string(DetectorType.LAB)]
+    else:
+        # Check if the component is part of the detector names
+        for _, detector in list(detectors.items()):
+            if detector.detector_name == component or detector.detector_name_short == component:
+                selected_detector = detector
+    return selected_detector
+
+
+class SANSMove(DataProcessorAlgorithm):
+    def category(self):
+        return 'SANS\\Move'
+
+    def summary(self):
+        return 'Moves SANS workspaces.'
+
+    def _make_move_type_map(self):
+        return {'InitialMove': MoveType.InitialMove,
+                'ElementaryDisplacement': MoveType.ElementaryDisplacement,
+                'SetToZero': MoveType.SetToZero}
+
+    def PyInit(self):
+        # State
+        self.declareProperty(PropertyManagerProperty('SANSState'),
+                             doc='A property manager which fulfills the SANSState contract.')
+
+        # Workspace which is to be moved
+        self.declareProperty(MatrixWorkspaceProperty("Workspace", '',
+                                                     optional=PropertyMode.Mandatory, direction=Direction.InOut),
+                             doc='The sample scatter workspace. This workspace does not contain monitors.')
+
+        # Move Type
+        move_types = StringListValidator(list(self._make_move_type_map().keys()))
+        self.declareProperty('MoveType', 'ElementaryDisplacement', validator=move_types, direction=Direction.Input,
+                             doc='The type of movement. This can be: '
+                                 '1) InitialMove for freshly workspaces, '
+                                 '2) ElementaryDisplacement of the instrument component, '
+                                 '3) SetToZero resets a component to the initial position.')
+
+        # Coordinates of the beam
+        self.declareProperty(FloatArrayProperty(name='BeamCoordinates', values=[]),
+                             doc='The coordinates which are used to position the instrument component(s). If nothing '
+                                 'is specified, then the coordinates from SANSState are used')
+
+        # Components which are to be moved
+        self.declareProperty('Component', '', direction=Direction.Input, doc='Component that should be moved.')
+
+    def PyExec(self):
+        # Read the state
+        state_property_manager = self.getProperty("SANSState").value
+        state = create_deserialized_sans_state_from_property_manager(state_property_manager)
+
+        # Get the correct SANS move strategy from the SANSMoveFactory
+        workspace = self.getProperty("Workspace").value
+        move_factory = SANSMoveFactory()
+        mover = move_factory.create_mover(workspace)
+
+        # Get the selected component and the beam coordinates
+        move_info = state.move
+        full_component_name = self._get_full_component_name(move_info)
+        coordinates = self._get_coordinates(move_info, full_component_name)
+
+        # Get which move operation the user wants to perform on the workspace. This can be:
+        # 1. Initial move: Suitable when a workspace has been freshly loaded.
+        # 2. Elementary displacement: Takes the degrees of freedom of the detector into account. This is normally used
+        #    for beam center finding
+        # 3. Set to zero: Set the component to its zero position
+        progress = Progress(self, start=0.0, end=1.0, nreports=2)
+        selected_move_type = self._get_move_type()
+        if selected_move_type is MoveType.ElementaryDisplacement:
+            progress.report("Starting elementary displacement")
+            mover.move_with_elementary_displacement(move_info, workspace, coordinates, full_component_name)
+        elif selected_move_type is MoveType.InitialMove:
+            progress.report("Starting initial move.")
+            mover.move_initial(move_info, workspace, coordinates, full_component_name)
+        elif selected_move_type is MoveType.SetToZero:
+            progress.report("Starting set to zero.")
+            mover.set_to_zero(move_info, workspace, full_component_name)
+        else:
+            raise ValueError("SANSMove: The selection {0} for the  move type "
+                             "is unknown".format(str(selected_move_type)))
+        progress.report("Completed move.")
+
+    def _get_full_component_name(self, move_info):
+        """
+        Select the detector name for the input component.
+
+        The component can be either:
+        1. An actual component name for LAB or HAB
+        2. Or the word HAB, LAB which will then select the actual component name, e.g. main-detector-bank
+        :param move_info: a SANSStateMove object
+        :return: the full name of the component or an empty string if it is not found.
+        """
+        component = self.getProperty("Component").value
+        selected_detector = get_detector_for_component(move_info, component)
+        return selected_detector.detector_name if selected_detector is not None else ""
+
+    def _get_move_type(self):
+        move_type_input = self.getProperty("MoveType").value
+        move_type_map = self._make_move_type_map()
+        return move_type_map[move_type_input]
+
+    def _get_coordinates(self, move_info, full_component_name):
+        """
+        Gets the coordinates for a particular component.
+
+        If the coordinates were not specified by the user then the coordinates are taken from the move state.
+        There are several possible scenarios
+        1. component is specified => take the beam centre from the appropriate detector
+        2. component is not specified => take the beam centre from the LAB
+        :param move_info: a SANSStateMove object
+        :param full_component_name: The full component name as it is known to the Mantid instrument
+        :return:
+        """
+        coordinates = self.getProperty("BeamCoordinates").value.tolist()
+        if not coordinates:
+            # Get the selected detector
+            detectors = move_info.detectors
+            selected_detector = get_detector_for_component(move_info, full_component_name)
+
+            # If the detector is unknown take the position from the LAB
+            if selected_detector is None:
+                selected_detector = detectors[DetectorType.to_string(DetectorType.LAB)]
+            pos1 = selected_detector.sample_centre_pos1
+            pos2 = selected_detector.sample_centre_pos2
+            coordinates = [pos1, pos2]
+        return coordinates
+
+    def validateInputs(self):
+        errors = dict()
+        # Check that the input can be converted into the right state object
+        state_property_manager = self.getProperty("SANSState").value
+        try:
+            state = create_deserialized_sans_state_from_property_manager(state_property_manager)
+            state.property_manager = state_property_manager
+            state.validate()
+        except ValueError as err:
+            errors.update({"SANSSMove": str(err)})
+
+        # Check that if the MoveType is either InitialMove or ElementaryDisplacement, then there are beam coordinates
+        # supplied. In the case of SetToZero these coordinates are ignored if they are supplied
+        coordinates = self.getProperty("BeamCoordinates").value
+        selected_move_type = self._get_move_type()
+        if len(coordinates) == 0 and (selected_move_type is MoveType.ElementaryDisplacement):
+            errors.update({"BeamCoordinates": "Beam coordinates were not specified. An elementary displacement "
+                                              "requires beam coordinates."})
+        return errors
+
+
+# Register algorithm with Mantid
+AlgorithmFactory.subscribe(SANSMove)
diff --git a/Framework/PythonInterface/test/python/plugins/algorithms/FindEPPTest.py b/Framework/PythonInterface/test/python/plugins/algorithms/FindEPPTest.py
index 97eaab3bf75bde8083d7e4b93e8c3430b9735e08..14eef9a18081873906761dbcc2e0b700cbdc2708 100644
--- a/Framework/PythonInterface/test/python/plugins/algorithms/FindEPPTest.py
+++ b/Framework/PythonInterface/test/python/plugins/algorithms/FindEPPTest.py
@@ -1,12 +1,11 @@
 from __future__ import (absolute_import, division, print_function)
 
 import unittest
+import mantid
 from mantid.simpleapi import DeleteWorkspace, CreateSampleWorkspace, CloneWorkspace, GroupWorkspaces
-from testhelpers import run_algorithm
-from mantid.api import AnalysisDataService, WorkspaceGroup
+from mantid.api import AnalysisDataService, WorkspaceGroup, mtd
 import numpy as np
-
-
+from testhelpers import run_algorithm
 
 
 class FindEPPTest(unittest.TestCase):
@@ -102,5 +101,15 @@ class FindEPPTest(unittest.TestCase):
         run_algorithm("DeleteWorkspace", Workspace=wsoutput)
         run_algorithm("DeleteWorkspace", Workspace=ws_narrow)
 
+    def testFitOutputWorkspacesAreDeleted(self):
+        OutputWorkspaceName = "outputws1"
+        alg_test = run_algorithm("FindEPP", InputWorkspace=self._input_ws, OutputWorkspace=OutputWorkspaceName)
+        wsoutput = AnalysisDataService.retrieve(OutputWorkspaceName)
+        DeleteWorkspace(wsoutput)
+        oldOption = mantid.config['MantidOptions.InvisibleWorkspaces']
+        mantid.config['MantidOptions.InvisibleWorkspaces'] = '1'
+        self.assertEqual(mtd.size(), 1) # Only self._input_ws exists.
+        mantid.config['MantidOptions.InvisibleWorkspaces'] = oldOption
+
 if __name__ == "__main__":
     unittest.main()
diff --git a/Framework/PythonInterface/test/python/plugins/algorithms/MatchPeaksTest.py b/Framework/PythonInterface/test/python/plugins/algorithms/MatchPeaksTest.py
index 400a3ba880b6d551117f913499d0ec7ec403f885..88fd4e37527a3a613bec0635c6e5f69cc08316f5 100644
--- a/Framework/PythonInterface/test/python/plugins/algorithms/MatchPeaksTest.py
+++ b/Framework/PythonInterface/test/python/plugins/algorithms/MatchPeaksTest.py
@@ -7,6 +7,7 @@ from mantid.simpleapi import *
 from mantid.api import *
 from testhelpers import run_algorithm
 
+
 class MatchPeaksTest(unittest.TestCase):
 
     _args = {}
@@ -100,7 +101,7 @@ class MatchPeaksTest(unittest.TestCase):
         if AnalysisDataService.doesExist('to_be_shifted'):
             DeleteWorkspace(self._ws_shift)
         if AnalysisDataService.doesExist('in_2'):
-            DeleteWorkspace(self._ws_in_2 )
+            DeleteWorkspace(self._ws_in_2)
         if AnalysisDataService.doesExist('output'):
             DeleteWorkspace(mtd['output'])
         if AnalysisDataService.doesExist('wrong_number_of_histograms'):
@@ -108,60 +109,41 @@ class MatchPeaksTest(unittest.TestCase):
         if AnalysisDataService.doesExist('wrong_number_of_bins'):
             DeleteWorkspace(self._in2)
 
-    def testValidatorInput(self):
+    def testValidateInputWorkspace(self):
         self._args['OutputWorkspace'] = 'output'
-        # Test if incompatible workspaces will fail
-        if sys.version_info >= (2, 7):
-            with self.assertRaises(RuntimeError):
-                self._args['InputWorkspace'] = self._in1
-                run_algorithm('MatchPeaks', **self._args)
-
-                self._args['InputWorkspace'] = self._in2
-                run_algorithm('MatchPeaks', **self._args)
-        else:
-            incompatible = False
-            try:
-                self._args['InputWorkspace'] = self._in1
-                run_algorithm('MatchPeaks', **self._args)
-
-                self._args['InputWorkspace'] = self._in2
-                run_algorithm('MatchPeaks', **self._args)
-            except RuntimeError:
-                incompatible = True
-            self.assertTrue(incompatible, "Workspaces are incompatible")
-
-        # Test if compatible workspaces will be accepted (size, X-values, E-values)
+        self.assertTrue(sys.version_info >= (2, 7))
+        with self.assertRaises(RuntimeError) as contextManager:
+            self._args['InputWorkspace'] = self._in1
+            run1 = run_algorithm('MatchPeaks', **self._args)
+            self.assertTrue(run1.isExecuted())
+        self.assertEqual('Some invalid Properties found', str(contextManager.exception))
+        with self.assertRaises(RuntimeError) as contextManager:
+            self._args['InputWorkspace'] = self._in2
+            run2 = run_algorithm('MatchPeaks', **self._args)
+            self.assertTrue(run2.isExecuted())
+        self.assertEqual('Some invalid Properties found', str(contextManager.exception))
+
+    def testValidateInputWorkspace2(self):
         self._args['InputWorkspace'] = self._ws_shift
-        alg_test = run_algorithm('MatchPeaks', **self._args)
-        self.assertTrue(alg_test.isExecuted())
-
-    def testValidatorInput2(self):
+        self._args['OutputWorkspace'] = 'output'
+        self.assertTrue(sys.version_info >= (2, 7))
+        with self.assertRaises(RuntimeError) as contextManager:
+            self._args['InputWorkspace2'] = self._in1
+            run_algorithm('MatchPeaks', **self._args)
+        self.assertEqual('Some invalid Properties found', str(contextManager.exception))
+        with self.assertRaises(RuntimeError) as contextManager:
+            self._args['InputWorkspace2'] = self._in2
+            run_algorithm('MatchPeaks', **self._args)
+        self.assertEqual('Some invalid Properties found', str(contextManager.exception))
+
+    def testValidateInputWorkspace3(self):
         self._args['InputWorkspace'] = self._ws_shift
+        self._args['InputWorkspace3'] = self._ws_in_3
         self._args['OutputWorkspace'] = 'output'
-        # Test if incompatible workspaces will fail
-        if sys.version_info >= (2, 7):
-            with self.assertRaises(RuntimeError):
-                self._args['InputWorkspace2'] = self._in1
-                run_algorithm('MatchPeaks', **self._args)
-
-                self._args['InputWorkspace2'] = self._in2
-                run_algorithm('MatchPeaks', **self._args)
-        else:
-            incompatible = False
-            try:
-                self._args['InputWorkspace2'] = self._in1
-                run_algorithm('MatchPeaks', **self._args)
-
-                self._args['InputWorkspace2'] = self._in2
-                run_algorithm('MatchPeaks', **self._args)
-            except RuntimeError:
-                incompatible = True
-            self.assertTrue(incompatible, "Workspaces are incompatible")
-
-        # Test if compatible workspaces will be accepted (size, X-values, E-values)
-        self._args['InputWorkspace2'] = self._ws_in_2
-        alg_test = run_algorithm('MatchPeaks', **self._args)
-        self.assertTrue(alg_test.isExecuted())
+        self.assertTrue(sys.version_info >= (2, 7))
+        with self.assertRaises(RuntimeError) as contextManager:
+            run_algorithm('MatchPeaks', **self._args)
+        self.assertEqual('Some invalid Properties found', str(contextManager.exception))
 
     def testMatchCenter(self):
         # Input workspace should match its center
diff --git a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/IndirectTab.h b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/IndirectTab.h
index 1420683abd9cef0a5d9d8391f985cefa635d45b4..9d820c2a7a62cd59a7a194fd970aca40ebf5df52 100644
--- a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/IndirectTab.h
+++ b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/IndirectTab.h
@@ -99,7 +99,9 @@ protected:
   QString getWorkspaceSuffix(const QString &wsName);
   /// Gets the base name of a workspace
   QString getWorkspaceBasename(const QString &wsName);
-
+  /// Plot multiple spectra from multiple workspaces
+  void plotMultipleSpectra(const QStringList &workspaceNames,
+                           const std::vector<int> &workspaceIndices);
   /// Plot a spectrum plot with a given ws index
   void plotSpectrum(const QStringList &workspaceNames, int wsIndex = 0);
   /// Plot a spectrum plot of a given workspace
diff --git a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/Quasi.h b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/Quasi.h
index 741aaf97bb80ede486063e452ea655b8ac92aa77..31bc21ba040f9532226b9215a630514f7ac4cb39 100644
--- a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/Quasi.h
+++ b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/Quasi.h
@@ -42,6 +42,8 @@ private slots:
   void saveClicked();
   // Handles plotting
   void plotClicked();
+  // Handles plotting current preview
+  void plotCurrentPreview();
 
 private:
   /// Current preview spectrum
diff --git a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/Quasi.ui b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/Quasi.ui
index 77bb3a92221337cd5e37acecd893f2632a8a22f4..6b188829c05499ea402b871a55da59b44371dd1f 100644
--- a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/Quasi.ui
+++ b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/Quasi.ui
@@ -31,9 +31,6 @@
          <verstretch>0</verstretch>
         </sizepolicy>
        </property>
-       <property name="loadLabelText" stdset="0">
-        <string>Plot</string>
-       </property>
        <property name="workspaceSuffixes" stdset="0">
         <stringlist>
          <string>_red</string>
@@ -46,12 +43,18 @@
          <string>_sqw.nxs</string>
         </stringlist>
        </property>
+       <property name="showLoad" stdset="0">
+        <bool>false</bool>
+       </property>
+       <property name="autoLoad" stdset="0">
+        <bool>true</bool>
+       </property>
       </widget>
      </item>
      <item row="0" column="0">
-      <widget class="QLabel" name="lblInput">
+      <widget class="QLabel" name="lblSample">
        <property name="text">
-        <string>Input:</string>
+        <string>Sample:</string>
        </property>
       </widget>
      </item>
@@ -310,6 +313,13 @@
            </property>
           </spacer>
          </item>
+         <item>
+          <widget class="QPushButton" name="pbPlotPreview">
+           <property name="text">
+            <string>Plot Current Preview</string>
+           </property>
+          </widget>
+         </item>
         </layout>
        </item>
       </layout>
@@ -404,11 +414,6 @@
   </layout>
  </widget>
  <customwidgets>
-  <customwidget>
-   <class>MantidQt::API::MWRunFiles</class>
-   <extends>QWidget</extends>
-   <header>MantidQtAPI/MWRunFiles.h</header>
-  </customwidget>
   <customwidget>
    <class>MantidQt::MantidWidgets::DataSelector</class>
    <extends>QWidget</extends>
@@ -420,6 +425,11 @@
    <header>MantidQtMantidWidgets/PreviewPlot.h</header>
    <container>1</container>
   </customwidget>
+  <customwidget>
+   <class>MantidQt::API::MWRunFiles</class>
+   <extends>QWidget</extends>
+   <header>MantidQtAPI/MWRunFiles.h</header>
+  </customwidget>
  </customwidgets>
  <resources/>
  <connections/>
diff --git a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/ResNorm.h b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/ResNorm.h
index 68dfb23b5bd52e15d78c0ac1a4abbec7af629cb8..8cf1c3ccc0701883bf78336549c9a99ff61d79a6 100644
--- a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/ResNorm.h
+++ b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/ResNorm.h
@@ -37,6 +37,7 @@ private slots:
   /// Slots to handle plot and save
   void saveClicked();
   void plotClicked();
+  void plotCurrentPreview();
 
 private:
   /// Current preview spectrum
diff --git a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/ResNorm.ui b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/ResNorm.ui
index 3daf281b516a5cbf330782ab8704015abe36cb07..6ab80828119005757d99103182733a5e5170a2f0 100644
--- a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/ResNorm.ui
+++ b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/ResNorm.ui
@@ -73,9 +73,6 @@
        <property name="autoLoad" stdset="0">
         <bool>true</bool>
        </property>
-       <property name="loadLabelText" stdset="0">
-        <string>Plot</string>
-       </property>
        <property name="workspaceSuffixes" stdset="0">
         <stringlist>
          <string>_red</string>
@@ -86,6 +83,9 @@
          <string>_red.nxs</string>
         </stringlist>
        </property>
+       <property name="showLoad" stdset="0">
+        <bool>false</bool>
+       </property>
       </widget>
      </item>
     </layout>
@@ -140,6 +140,13 @@
            </property>
           </spacer>
          </item>
+         <item>
+          <widget class="QPushButton" name="pbPlotCurrent">
+           <property name="text">
+            <string>Plot Current Preview</string>
+           </property>
+          </widget>
+         </item>
         </layout>
        </item>
       </layout>
diff --git a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/Stretch.h b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/Stretch.h
index 6de34a2f4f33d077f149fcc4e7edcdb5dd76c076..934b8cdfa850b0689b226845e294fdc70145ccd9 100644
--- a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/Stretch.h
+++ b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/Stretch.h
@@ -34,8 +34,12 @@ private slots:
   /// Plot the workspaces specified by the interface
   void plotWorkspaces();
   void algorithmComplete(const bool &error);
+  void plotCurrentPreview();
+  void previewSpecChanged(int value);
 
 private:
+  /// Current preview spectrum
+  int m_previewSpec;
   // The ui form
   Ui::Stretch m_uiForm;
   // Output Names
diff --git a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/Stretch.ui b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/Stretch.ui
index b0534ce9d6b7db9a7178e8bf06a94cb4ad886210..47ada47dfbd114f29d489e21a88b65e9b122bda3 100644
--- a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/Stretch.ui
+++ b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/Stretch.ui
@@ -31,9 +31,6 @@
          <verstretch>0</verstretch>
         </sizepolicy>
        </property>
-       <property name="loadLabelText" stdset="0">
-        <string>Plot</string>
-       </property>
        <property name="workspaceSuffixes" stdset="0">
         <stringlist>
          <string>_red</string>
@@ -46,6 +43,12 @@
          <string>_sqw.nxs</string>
         </stringlist>
        </property>
+       <property name="autoLoad" stdset="0">
+        <bool>true</bool>
+       </property>
+       <property name="showLoad" stdset="0">
+        <bool>false</bool>
+       </property>
       </widget>
      </item>
      <item row="1" column="0">
@@ -165,11 +168,11 @@
     </widget>
    </item>
    <item>
-    <layout class="QHBoxLayout" name="horizontalLayout_3">
-     <item>
+    <layout class="QGridLayout" name="gridLayout_3">
+     <item row="0" column="0">
       <layout class="QVBoxLayout" name="treeSpace"/>
      </item>
-     <item>
+     <item row="0" column="2">
       <widget class="MantidQt::MantidWidgets::PreviewPlot" name="ppPlot" native="true">
        <property name="canvasColour" stdset="0">
         <color>
@@ -183,6 +186,40 @@
        </property>
       </widget>
      </item>
+     <item row="1" column="2">
+      <layout class="QHBoxLayout" name="horizontalLayout_2">
+       <item>
+        <widget class="QLabel" name="lbPreviewSpectrum">
+         <property name="text">
+          <string>Current Spectrum:</string>
+         </property>
+        </widget>
+       </item>
+       <item>
+        <widget class="QSpinBox" name="spPreviewSpectrum"/>
+       </item>
+       <item>
+        <spacer name="horizontalSpacer_2">
+         <property name="orientation">
+          <enum>Qt::Horizontal</enum>
+         </property>
+         <property name="sizeHint" stdset="0">
+          <size>
+           <width>40</width>
+           <height>20</height>
+          </size>
+         </property>
+        </spacer>
+       </item>
+       <item>
+        <widget class="QPushButton" name="pbPlotPreview">
+         <property name="text">
+          <string>Plot Current Preview</string>
+         </property>
+        </widget>
+       </item>
+      </layout>
+     </item>
     </layout>
    </item>
    <item>
diff --git a/MantidQt/CustomInterfaces/src/Indirect/IndirectTab.cpp b/MantidQt/CustomInterfaces/src/Indirect/IndirectTab.cpp
index 96a489bface68f9a6362220a0f26c41c7e4e8915..6137629fa50bfd24b3072c238fbf5493789322e0 100644
--- a/MantidQt/CustomInterfaces/src/Indirect/IndirectTab.cpp
+++ b/MantidQt/CustomInterfaces/src/Indirect/IndirectTab.cpp
@@ -10,8 +10,8 @@
 #include "MantidQtAPI/InterfaceManager.h"
 #include "MantidQtMantidWidgets/RangeSelector.h"
 
-#include <boost/algorithm/string/find.hpp>
 #include <QMessageBox>
+#include <boost/algorithm/string/find.hpp>
 
 using namespace Mantid::API;
 using namespace Mantid::Geometry;
@@ -206,6 +206,40 @@ QString IndirectTab::getWorkspaceBasename(const QString &wsName) {
   return wsName.left(lastUnderscoreIndex);
 }
 
+/**
+ * Plots different spectra from multiple workspaces on the same plot
+ *
+ * This uses the plotSpectrum function from the Python API.
+ *
+ * @param workspaceNames List of names of workspaces to plot
+ * @param workspaceIndices List of indices to plot
+ */
+void IndirectTab::plotMultipleSpectra(
+    const QStringList &workspaceNames,
+    const std::vector<int> &workspaceIndices) {
+
+  if (workspaceNames.isEmpty())
+    return;
+  if (workspaceNames.length() != static_cast<int>(workspaceIndices.size()))
+    return;
+
+  QString pyInput = "from mantidplot import plotSpectrum\n";
+  pyInput += "current_window = plotSpectrum('";
+  pyInput += workspaceNames[0];
+  pyInput += "', ";
+  pyInput += QString::number(workspaceIndices[0]);
+  pyInput += ")\n";
+
+  for (int i = 1; i < workspaceNames.size(); i++) {
+    pyInput += "plotSpectrum('";
+    pyInput += workspaceNames[i];
+    pyInput += "', ";
+    pyInput += QString::number(workspaceIndices[i]);
+    pyInput += ", window=current_window)\n";
+  }
+  m_pythonRunner.runPythonCode(pyInput);
+}
+
 /**
  * Creates a spectrum plot of one or more workspaces at a given spectrum
  * index.
@@ -295,14 +329,14 @@ void IndirectTab::plotSpectrum(const QString &workspaceName, int specStart,
 }
 
 /**
-* Creates a spectrum plot of one or more workspaces with a set
-*  of spectra specified in a vector
-*
-* This uses the plotSpectrum function from the Python API.
-*
-* @param workspaceNames List of names of workspaces to plot
-* @param wsIndices List of indices of spectra to plot
-*/
+ * Creates a spectrum plot of one or more workspaces with a set
+ *  of spectra specified in a vector
+ *
+ * This uses the plotSpectrum function from the Python API.
+ *
+ * @param workspaceNames List of names of workspaces to plot
+ * @param wsIndices List of indices of spectra to plot
+ */
 void IndirectTab::plotSpectra(const QStringList &workspaceNames,
                               const std::vector<int> &wsIndices) {
   if (workspaceNames.isEmpty()) {
@@ -326,12 +360,12 @@ void IndirectTab::plotSpectra(const QStringList &workspaceNames,
 }
 
 /**
-* Creates a spectrum plot of a single workspace with a set
-*  of spectra specified in a vector
-*
-* @param workspaceName Name of workspace to plot
-* @param wsIndices List of indices of spectra to plot
-*/
+ * Creates a spectrum plot of a single workspace with a set
+ *  of spectra specified in a vector
+ *
+ * @param workspaceName Name of workspace to plot
+ * @param wsIndices List of indices of spectra to plot
+ */
 void IndirectTab::plotSpectra(const QString &workspaceName,
                               const std::vector<int> &wsIndices) {
   if (workspaceName.isEmpty()) {
diff --git a/MantidQt/CustomInterfaces/src/Indirect/Quasi.cpp b/MantidQt/CustomInterfaces/src/Indirect/Quasi.cpp
index 4f0535e42f526d43b2f91bde719c33deaaa78a88..6041eddb367f87ceb99c8287c7305ba2a7aa0190 100644
--- a/MantidQt/CustomInterfaces/src/Indirect/Quasi.cpp
+++ b/MantidQt/CustomInterfaces/src/Indirect/Quasi.cpp
@@ -53,7 +53,7 @@ Quasi::Quasi(QWidget *parent) : IndirectBayesTab(parent), m_previewSpec(0) {
   connect(m_uiForm.dsResolution, SIGNAL(dataReady(const QString &)), this,
           SLOT(handleResolutionInputReady(const QString &)));
 
-  // Connect the progrm selector to its handler
+  // Connect the program selector to its handler
   connect(m_uiForm.cbProgram, SIGNAL(currentIndexChanged(int)), this,
           SLOT(handleProgramChange(int)));
 
@@ -61,6 +61,10 @@ Quasi::Quasi(QWidget *parent) : IndirectBayesTab(parent), m_previewSpec(0) {
   connect(m_uiForm.spPreviewSpectrum, SIGNAL(valueChanged(int)), this,
           SLOT(previewSpecChanged(int)));
 
+  // Plot current preview
+  connect(m_uiForm.pbPlotPreview, SIGNAL(clicked()), this,
+          SLOT(plotCurrentPreview()));
+
   // Post saving
   connect(m_uiForm.pbSave, SIGNAL(clicked()), this, SLOT(saveClicked()));
 
@@ -231,7 +235,7 @@ void Quasi::run() {
   m_batchAlgoRunner->executeBatchAsync();
 }
 /**
- * Enable plotting and savimg and fit curves on the mini plot.
+ * Enable plotting and saving and fit curves on the mini plot.
  */
 void Quasi::algorithmComplete(bool error) {
   if (error)
@@ -333,9 +337,30 @@ void Quasi::handleSampleInputReady(const QString &filename) {
   eRangeSelector->setMaximum(range.second);
 }
 
+/**
+* Plots the current preview on the miniplot
+*/
+void Quasi::plotCurrentPreview() {
+
+  if (m_uiForm.ppPlot->hasCurve("fit.1")) {
+    QString program = m_uiForm.cbProgram->currentText();
+    auto fitName = m_QuasiAlg->getPropertyValue("OutputWorkspaceFit");
+    checkADSForPlotSaveWorkspace(fitName, false);
+    fitName.pop_back();
+    QString QfitWS = QString::fromStdString(fitName + "_");
+    QfitWS += QString::number(m_previewSpec);
+    if (program == "Lorentzians")
+      plotSpectra(QfitWS, {0, 1, 2, 4});
+    else
+      plotSpectra(QfitWS, {0, 1, 2});
+  } else if (m_uiForm.ppPlot->hasCurve("Sample")) {
+    plotSpectrum(m_uiForm.dsSample->getCurrentDataName(), m_previewSpec);
+  }
+}
+
 /**
  * Toggles the use ResNorm option depending on if the resolution file is a
- * resolution or vanadoum reduction.
+ * resolution or vanadium reduction.
  * @param wsName The name of the workspace loaded
  */
 void Quasi::handleResolutionInputReady(const QString &wsName) {
@@ -384,7 +409,7 @@ void Quasi::updateProperties(QtProperty *prop, double val) {
 }
 
 /**
- * Handles when the slected item in the program combobox
+ * Handles when the selected item in the program combobox
  * is changed
  *
  * @param index :: The current index of the combobox
diff --git a/MantidQt/CustomInterfaces/src/Indirect/ResNorm.cpp b/MantidQt/CustomInterfaces/src/Indirect/ResNorm.cpp
index d91c198cdb71cf99e2fb592c741a00b0f6d6b5f5..c77f7b9cc424703583a92339d11a3daedd13aa57 100644
--- a/MantidQt/CustomInterfaces/src/Indirect/ResNorm.cpp
+++ b/MantidQt/CustomInterfaces/src/Indirect/ResNorm.cpp
@@ -1,9 +1,9 @@
 #include "MantidQtCustomInterfaces/Indirect/ResNorm.h"
 
-#include "MantidQtCustomInterfaces/UserInputValidator.h"
+#include "MantidAPI/ITableWorkspace.h"
 #include "MantidAPI/WorkspaceFactory.h"
 #include "MantidAPI/WorkspaceGroup.h"
-#include "MantidAPI/ITableWorkspace.h"
+#include "MantidQtCustomInterfaces/UserInputValidator.h"
 
 using namespace Mantid::API;
 
@@ -47,6 +47,8 @@ ResNorm::ResNorm(QWidget *parent) : IndirectBayesTab(parent), m_previewSpec(0) {
   // Post Plot and Save
   connect(m_uiForm.pbSave, SIGNAL(clicked()), this, SLOT(saveClicked()));
   connect(m_uiForm.pbPlot, SIGNAL(clicked()), this, SLOT(plotClicked()));
+  connect(m_uiForm.pbPlotCurrent, SIGNAL(clicked()), this,
+          SLOT(plotCurrentPreview()));
 }
 
 void ResNorm::setup() {}
@@ -289,21 +291,50 @@ void ResNorm::previewSpecChanged(int value) {
               fitWsName);
 
       MatrixWorkspace_sptr fit = WorkspaceFactory::Instance().create(fitWs, 1);
-      fit->setSharedX(0, fit->sharedX(1));
-      fit->setSharedY(0, fit->sharedY(1));
-      fit->setSharedE(0, fit->sharedE(1));
+      fit->setSharedX(0, fitWs->sharedX(1));
+      fit->setSharedY(0, fitWs->sharedY(1));
+      fit->setSharedE(0, fitWs->sharedE(1));
 
       for (size_t i = 0; i < fit->blocksize(); i++)
         fit->mutableY(0)[i] /= scaleFactors->cell<double>(m_previewSpec);
 
       m_uiForm.ppPlot->addSpectrum("Fit", fit, 0, Qt::red);
+
+      AnalysisDataService::Instance().addOrReplace(
+          "__" + fitWsGroupName + "_scaled", fit);
     }
   }
 }
 
 /**
-* Handles saving when button is clicked
-*/
+ * Plot the current spectrum in the miniplot
+ */
+
+void ResNorm::plotCurrentPreview() {
+
+  QStringList plotWorkspaces;
+  std::vector<int> plotIndices;
+
+  if (m_uiForm.ppPlot->hasCurve("Vanadium")) {
+    plotWorkspaces << m_uiForm.dsVanadium->getCurrentDataName();
+    plotIndices.push_back(m_previewSpec);
+  }
+  if (m_uiForm.ppPlot->hasCurve("Resolution")) {
+    plotWorkspaces << m_uiForm.dsResolution->getCurrentDataName();
+    plotIndices.push_back(0);
+  }
+  if (m_uiForm.ppPlot->hasCurve("Fit")) {
+    std::string fitWsGroupName(m_pythonExportWsName + "_Fit_Workspaces");
+
+    plotWorkspaces << QString::fromStdString("__" + fitWsGroupName + "_scaled");
+    plotIndices.push_back(0);
+  }
+  plotMultipleSpectra(plotWorkspaces, plotIndices);
+}
+
+/**
+ * Handles saving when button is clicked
+ */
 
 void ResNorm::saveClicked() {
 
@@ -320,8 +351,8 @@ void ResNorm::saveClicked() {
 }
 
 /**
-* Handles plotting when button is clicked
-*/
+ * Handles plotting when button is clicked
+ */
 
 void ResNorm::plotClicked() {
   WorkspaceGroup_sptr fitWorkspaces =
diff --git a/MantidQt/CustomInterfaces/src/Indirect/Stretch.cpp b/MantidQt/CustomInterfaces/src/Indirect/Stretch.cpp
index 7d4cbb4fd97df20b962327ec918b668e9017762f..5040931328cd9aed149b85c636258d7c1896bc80 100644
--- a/MantidQt/CustomInterfaces/src/Indirect/Stretch.cpp
+++ b/MantidQt/CustomInterfaces/src/Indirect/Stretch.cpp
@@ -12,7 +12,8 @@ Mantid::Kernel::Logger g_log("Stretch");
 
 namespace MantidQt {
 namespace CustomInterfaces {
-Stretch::Stretch(QWidget *parent) : IndirectBayesTab(parent), m_save(false) {
+Stretch::Stretch(QWidget *parent)
+    : IndirectBayesTab(parent), m_previewSpec(0), m_save(false) {
   m_uiForm.setupUi(parent);
 
   // Create range selector
@@ -58,10 +59,15 @@ Stretch::Stretch(QWidget *parent) : IndirectBayesTab(parent), m_save(false) {
           SLOT(handleSampleInputReady(const QString &)));
   connect(m_uiForm.chkSequentialFit, SIGNAL(toggled(bool)), m_uiForm.cbPlot,
           SLOT(setEnabled(bool)));
+  // Connect preview spectrum spinner to handler
+  connect(m_uiForm.spPreviewSpectrum, SIGNAL(valueChanged(int)), this,
+          SLOT(previewSpecChanged(int)));
 
   // Connect the plot and save push buttons
   connect(m_uiForm.pbPlot, SIGNAL(clicked()), this, SLOT(plotWorkspaces()));
   connect(m_uiForm.pbSave, SIGNAL(clicked()), this, SLOT(saveWorkspaces()));
+  connect(m_uiForm.pbPlotPreview, SIGNAL(clicked()), this,
+          SLOT(plotCurrentPreview()));
 }
 
 void Stretch::setup() {}
@@ -177,7 +183,7 @@ void Stretch::algorithmComplete(const bool &error) {
 }
 
 /**
- * Handles the saving of workspaces post alogrithm completion
+ * Handles the saving of workspaces post algorithm completion
  * when save button is clicked
  */
 void Stretch::saveWorkspaces() {
@@ -264,6 +270,39 @@ void Stretch::handleSampleInputReady(const QString &filename) {
   // update the current positions of the range bars
   eRangeSelector->setMinimum(range.first);
   eRangeSelector->setMaximum(range.second);
+
+  // set the max spectrum
+  MatrixWorkspace_const_sptr sampleWs =
+      AnalysisDataService::Instance().retrieveWS<MatrixWorkspace>(
+          filename.toStdString());
+  const int spectra = static_cast<int>(sampleWs->getNumberHistograms());
+  m_uiForm.spPreviewSpectrum->setMaximum(spectra);
+}
+
+/**
+* Sets a new preview spectrum for the mini plot.
+*
+* @param value workspace index
+*/
+void Stretch::previewSpecChanged(int value) {
+  m_previewSpec = value;
+
+  if (!m_uiForm.dsSample->isValid())
+    return;
+
+  m_uiForm.ppPlot->clear();
+
+  QString sampleName = m_uiForm.dsSample->getCurrentDataName();
+  m_uiForm.ppPlot->addSpectrum("Sample", sampleName, m_previewSpec);
+}
+
+/**
+* plots the current miniplot preview
+*/
+void Stretch::plotCurrentPreview() {
+  if (m_uiForm.ppPlot->hasCurve("Sample")) {
+    plotSpectrum(m_uiForm.dsSample->getCurrentDataName(), m_previewSpec);
+  }
 }
 
 /**
diff --git a/MantidQt/CustomInterfaces/src/Muon/MuonAnalysis.cpp b/MantidQt/CustomInterfaces/src/Muon/MuonAnalysis.cpp
index 5919e139128c6b687c48244b8013bb2a8aa58317..da2fd8c13059da66e134fb3344a45fa0084ffde1 100644
--- a/MantidQt/CustomInterfaces/src/Muon/MuonAnalysis.cpp
+++ b/MantidQt/CustomInterfaces/src/Muon/MuonAnalysis.cpp
@@ -398,7 +398,7 @@ void MuonAnalysis::plotSelectedItem() {
 void MuonAnalysis::plotItem(ItemType itemType, int tableRow,
                             PlotType plotType) {
   m_updating = true;
-
+  m_dataSelector->clearChosenGroups();
   AnalysisDataServiceImpl &ads = AnalysisDataService::Instance();
 
   try {
@@ -3023,6 +3023,9 @@ void MuonAnalysis::multiFitCheckboxChanged(int state) {
                                                 ? Muon::MultiFitState::Enabled
                                                 : Muon::MultiFitState::Disabled;
   m_fitFunctionPresenter->setMultiFitState(multiFitState);
+  if (multiFitState == Muon::MultiFitState::Disabled) {
+    m_dataSelector->clearChosenGroups();
+  }
 }
 
 /**
diff --git a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/DataProcessorUI/DataProcessorCommandAdapter.h b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/DataProcessorUI/DataProcessorCommandAdapter.h
index 8d9ff1d547d3710c7f33478bf12bdcae03cfa437..747e21803b0a38f8754082e10cacd6a11a57e947 100644
--- a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/DataProcessorUI/DataProcessorCommandAdapter.h
+++ b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/DataProcessorUI/DataProcessorCommandAdapter.h
@@ -84,21 +84,26 @@ public:
     if (!m_adaptee->hasChild()) {
       // Sub-menus cannot be added to a toolbar
 
-      QAction *action = getAction();
+      QAction *action = getAction(true);
       toolbar->addAction(action);
     }
   };
 
-  /** Returns the action */
-  QAction *getAction() {
+  /**
+  * Returns the action
+  *
+  * @param shortcut : Whether or not to add a shortcut
+  */
+  QAction *getAction(bool shortcut = false) {
     QAction *action =
         new QAction(QString::fromStdString(m_adaptee->name()), this);
     action->setIcon(QIcon(QString::fromStdString(m_adaptee->icon())));
     action->setSeparator(m_adaptee->isSeparator());
     action->setToolTip(QString::fromStdString(m_adaptee->tooltip()));
     action->setWhatsThis(QString::fromStdString(m_adaptee->whatsthis()));
-    action->setShortcut(
-        QKeySequence(QString::fromStdString(m_adaptee->shortcut())));
+    if (shortcut)
+      action->setShortcut(
+          QKeySequence(QString::fromStdString(m_adaptee->shortcut())));
     connect(action, SIGNAL(triggered()), this, SLOT(call()));
 
     return action;
diff --git a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MuonFitDataSelector.h b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MuonFitDataSelector.h
index 14e4c1fe14a862b0583814f6db1d33405af0176b..5db55ca48cb933050c980c02f4fceec8298ee45d 100644
--- a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MuonFitDataSelector.h
+++ b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MuonFitDataSelector.h
@@ -62,6 +62,8 @@ public:
   QStringList getChosenGroups() const override;
   /// Set chosen group
   void setChosenGroup(const QString &group) override;
+  /// Clear list of selected groups
+  void clearChosenGroups() const;
   /// Get selected periods
   QStringList getPeriodSelections() const override;
   /// Set selected period
diff --git a/MantidQt/MantidWidgets/src/DataProcessorUI/DataProcessorTwoLevelTreeManager.cpp b/MantidQt/MantidWidgets/src/DataProcessorUI/DataProcessorTwoLevelTreeManager.cpp
index cc737010c29687cd72c5299101c1eba6c4a2182c..013646b5b892999df386a9f594baac81f5ab6eff 100644
--- a/MantidQt/MantidWidgets/src/DataProcessorUI/DataProcessorTwoLevelTreeManager.cpp
+++ b/MantidQt/MantidWidgets/src/DataProcessorUI/DataProcessorTwoLevelTreeManager.cpp
@@ -330,7 +330,8 @@ void DataProcessorTwoLevelTreeManager::pasteSelected(const std::string &text) {
 
       int groupId = boost::lexical_cast<int>(values.front());
       int rowId = numRowsInGroup(groupId);
-      insertRow(groupId, rowId);
+      if (!m_model->insertRow(rowId, m_model->index(groupId, 0)))
+        return;
       for (int col = 0; col < m_model->columnCount(); col++) {
         m_model->setData(m_model->index(rowId, col, m_model->index(groupId, 0)),
                          QString::fromStdString(values[col + 1]));
diff --git a/MantidQt/MantidWidgets/src/DataProcessorUI/QDataProcessorTwoLevelTreeModel.cpp b/MantidQt/MantidWidgets/src/DataProcessorUI/QDataProcessorTwoLevelTreeModel.cpp
index ee275964451d8f177207fcec15e443d04eb5a06d..aeb766d70b29a625e1b6aa961b60d4b3674fa2d5 100644
--- a/MantidQt/MantidWidgets/src/DataProcessorUI/QDataProcessorTwoLevelTreeModel.cpp
+++ b/MantidQt/MantidWidgets/src/DataProcessorUI/QDataProcessorTwoLevelTreeModel.cpp
@@ -371,11 +371,21 @@ bool QDataProcessorTwoLevelTreeModel::removeRows(int position, int count,
 * @return : The number of rows
 */
 int QDataProcessorTwoLevelTreeModel::rowCount(const QModelIndex &parent) const {
-  return !parent.isValid()
-             ? static_cast<int>(m_rowsOfGroup.size())
-             : !parent.parent().isValid()
-                   ? static_cast<int>(m_rowsOfGroup[parent.row()].size())
-                   : 0;
+
+  // We are counting the number of groups
+  if (!parent.isValid())
+    return static_cast<int>(m_rowsOfGroup.size());
+
+  // This shouldn't happen
+  if (parent.parent().isValid())
+    return 0;
+
+  // This group does not exist anymore
+  if (parent.row() >= static_cast<int>(m_rowsOfGroup.size()))
+    return 0;
+
+  // Group exists, return number of children
+  return static_cast<int>(m_rowsOfGroup[parent.row()].size());
 }
 
 /** Updates an index with given data
diff --git a/MantidQt/MantidWidgets/src/MuonFitDataSelector.cpp b/MantidQt/MantidWidgets/src/MuonFitDataSelector.cpp
index 996ade87fc27872894e674d2a4b05e30f29265c4..07fb31bb11291b3deae887f1d0bb1cebfdecadb8 100644
--- a/MantidQt/MantidWidgets/src/MuonFitDataSelector.cpp
+++ b/MantidQt/MantidWidgets/src/MuonFitDataSelector.cpp
@@ -380,7 +380,15 @@ QStringList MuonFitDataSelector::getChosenGroups() const {
   }
   return chosen;
 }
-
+/**
+* Clears the list of selected groups (unchecks boxes)
+*/
+void MuonFitDataSelector::clearChosenGroups() const {
+  for (auto iter = m_groupBoxes.constBegin(); iter != m_groupBoxes.constEnd();
+       ++iter) {
+    iter.value()->setChecked(false);
+  }
+}
 /**
  * Set the chosen group ticked and all others off
  * Used when switching from Home tab to Data Analysis tab
diff --git a/MantidQt/MantidWidgets/test/DataProcessorUI/GenericDataProcessorPresenterTest.h b/MantidQt/MantidWidgets/test/DataProcessorUI/GenericDataProcessorPresenterTest.h
index f82f24023992aa2a586d1be503c51d388ea7a619..77c8de7fdb5abebe862092df433c0cd629ed0861 100644
--- a/MantidQt/MantidWidgets/test/DataProcessorUI/GenericDataProcessorPresenterTest.h
+++ b/MantidQt/MantidWidgets/test/DataProcessorUI/GenericDataProcessorPresenterTest.h
@@ -2750,6 +2750,30 @@ public:
     TS_ASSERT(Mock::VerifyAndClearExpectations(&mockMainPresenter));
   }
 
+  void testPasteToNonexistentGroup() {
+    NiceMock<MockDataProcessorView> mockDataProcessorView;
+    NiceMock<MockProgressableView> mockProgress;
+    NiceMock<MockMainPresenter> mockMainPresenter;
+    GenericDataProcessorPresenter presenter(
+        createReflectometryWhiteList(), createReflectometryPreprocessMap(),
+        createReflectometryProcessor(), createReflectometryPostprocessor());
+    presenter.acceptViews(&mockDataProcessorView, &mockProgress);
+    presenter.accept(&mockMainPresenter);
+
+    // Empty clipboard
+    EXPECT_CALL(mockDataProcessorView, getClipboard())
+        .Times(1)
+        .WillRepeatedly(Return("1\t123\t0.5\t456\t1.2\t3.4\t3.14\t5\tabc"));
+    EXPECT_CALL(mockDataProcessorView, getSelectedChildren())
+        .Times(1)
+        .WillOnce(Return(std::map<int, std::set<int>>()));
+    TS_ASSERT_THROWS_NOTHING(
+        presenter.notify(DataProcessorPresenter::PasteSelectedFlag));
+
+    TS_ASSERT(Mock::VerifyAndClearExpectations(&mockDataProcessorView));
+    TS_ASSERT(Mock::VerifyAndClearExpectations(&mockMainPresenter));
+  }
+
   void testImportTable() {
     NiceMock<MockDataProcessorView> mockDataProcessorView;
     NiceMock<MockProgressableView> mockProgress;
diff --git a/MantidQt/MantidWidgets/test/DataProcessorUI/QDataProcessorTwoLevelTreeModelTest.h b/MantidQt/MantidWidgets/test/DataProcessorUI/QDataProcessorTwoLevelTreeModelTest.h
index d6c901b88d7c63fcc2a8f6ae41f02bf17eb5b311..c7ec79f5932b1f19c93276c25afca0f76e2077bf 100644
--- a/MantidQt/MantidWidgets/test/DataProcessorUI/QDataProcessorTwoLevelTreeModelTest.h
+++ b/MantidQt/MantidWidgets/test/DataProcessorUI/QDataProcessorTwoLevelTreeModelTest.h
@@ -615,6 +615,13 @@ public:
     TS_ASSERT_EQUALS(ws_model->String(4, 1), "13469");
   }
 
+  void testCountRowsOfNonexistentGroup() {
+
+    QDataProcessorTwoLevelTreeModel model(oneRowTable(), m_whitelist);
+
+    TS_ASSERT_THROWS_NOTHING(model.rowCount(model.index(1, 0)));
+  }
+
 private:
   DataProcessorWhiteList m_whitelist;
 };
diff --git a/Testing/Data/SystemTest/LARMOR/V2_LARMOR00013065-add.nxs.md5 b/Testing/Data/SystemTest/LARMOR/V2_LARMOR00013065-add.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..13b2f4be272c39e7076e1f5e7c81df3a20c40249
--- /dev/null
+++ b/Testing/Data/SystemTest/LARMOR/V2_LARMOR00013065-add.nxs.md5
@@ -0,0 +1 @@
+2b3a7d9d99a746f00121164d48d6ef97
diff --git a/Testing/Data/SystemTest/SANS2D/AddedMultiPeriodTestFile-add.nxs.md5 b/Testing/Data/SystemTest/SANS2D/AddedMultiPeriodTestFile-add.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..05f441c4d433db5a2e4e1dfdac8cd6988b9397fb
--- /dev/null
+++ b/Testing/Data/SystemTest/SANS2D/AddedMultiPeriodTestFile-add.nxs.md5
@@ -0,0 +1 @@
+7dc856f1d9889f00f39cb80556a1275f
diff --git a/Testing/Data/UnitTest/LARMOR00013065-add.nxs.md5 b/Testing/Data/UnitTest/LARMOR00013065-add.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..3724fe80717f5576a57bb7c9f6aeb103cd4afb28
--- /dev/null
+++ b/Testing/Data/UnitTest/LARMOR00013065-add.nxs.md5
@@ -0,0 +1 @@
+0ad830e195879c54ff64583338d0e80a
diff --git a/Testing/Data/UnitTest/SANS2D00022024-add.nxs.md5 b/Testing/Data/UnitTest/SANS2D00022024-add.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..5cfb354e86139f89a5b17807b3f2dff5b087efe3
--- /dev/null
+++ b/Testing/Data/UnitTest/SANS2D00022024-add.nxs.md5
@@ -0,0 +1 @@
+32bd286841898170929b941a12255fda
diff --git a/Testing/SystemTests/tests/analysis/SANSLoadTest.py b/Testing/SystemTests/tests/analysis/SANSLoadTest.py
new file mode 100644
index 0000000000000000000000000000000000000000..0fddbd1158171a2ba5d46ab0894fca1e0e29eb6e
--- /dev/null
+++ b/Testing/SystemTests/tests/analysis/SANSLoadTest.py
@@ -0,0 +1,507 @@
+# pylint: disable=too-many-public-methods, invalid-name, too-many-arguments
+
+import unittest
+import stresstesting
+
+from mantid.dataobjects import (Workspace2D, EventWorkspace)
+from mantid.api import (AnalysisDataService, AlgorithmManager)
+
+from sans.algorithm_detail.load_data import SANSLoadDataFactory
+from sans.common.log_tagger import has_tag
+from sans.common.constants import (CALIBRATION_WORKSPACE_TAG, SANS_FILE_TAG)
+
+# Not clear why the names in the module are not found by Pylint, but it seems to get confused. Hence this check
+# needs to be disabled here.
+# pylint: disable=no-name-in-module
+from sans.test_helper.test_director import TestDirector
+from sans.common.enums import SANSFacility
+from sans.state.data import get_data_builder
+
+
+def remove_all_workspaces_from_ads():
+    workspaces_on_the_ads = AnalysisDataService.getObjectNames()
+    for name in workspaces_on_the_ads:
+        AnalysisDataService.remove(name)
+
+
+def compare_workspaces(workspace1, workspace2):
+    try:
+        alg = AlgorithmManager.createUnmanaged("CompareWorkspaces")
+        alg.initialize()
+        alg.setChild(True)
+        alg.setRethrows(True)
+        alg.setProperty("Workspace1", workspace1)
+        alg.setProperty("Workspace2", workspace2)
+        alg.setProperty("Tolerance", 1e-6)
+        alg.setProperty("ToleranceRelErr", True)
+        alg.setProperty("CheckAllData", True)
+        alg.execute()
+    except RuntimeError:
+        raise RuntimeError("Comparison was wrong.")
+
+
+# -----------------------------------------------
+# Tests for the Load factory
+# -----------------------------------------------
+class SANSLoadFactoryTest(unittest.TestCase):
+    def test_that_valid_file_information_does_not_raise(self):
+        # Arrange
+        load_factory = SANSLoadDataFactory()
+
+        ws_name_sample = "SANS2D00022024"
+        data_builder = get_data_builder(SANSFacility.ISIS)
+        data_builder.set_sample_scatter(ws_name_sample)
+        data = data_builder.build()
+
+        # Get the sample state
+        test_director = TestDirector()
+        test_director.set_states(data_state=data)
+        state = test_director.construct()
+
+        # Act + Assert
+        try:
+            load_factory.create_loader(state)
+            did_not_raise = True
+        except NotImplementedError:
+            did_not_raise = True
+        self.assertTrue(did_not_raise)
+
+
+# -----------------------------------------------
+# Tests for the SANSLoad algorithm
+# -----------------------------------------------
+class SANSLoadTest(unittest.TestCase):
+    @staticmethod
+    def _get_simple_state(sample_scatter, sample_trans=None, sample_direct=None,
+                          can_scatter=None, can_trans=None, can_direct=None, calibration=None,
+                          sample_scatter_period=None, sample_trans_period=None, sample_direct_period=None,
+                          can_scatter_period=None, can_trans_period=None, can_direct_period=None):
+        data_builder = get_data_builder(SANSFacility.ISIS)
+        data_builder.set_sample_scatter(sample_scatter)
+
+        # Set the file names
+        if sample_trans is not None:
+            data_builder.set_sample_transmission(sample_trans)
+
+        if sample_direct is not None:
+            data_builder.set_sample_direct(sample_direct)
+
+        if can_scatter is not None:
+            data_builder.set_can_scatter(can_scatter)
+
+        if can_trans is not None:
+            data_builder.set_can_transmission(can_trans)
+
+        if can_direct is not None:
+            data_builder.set_can_direct(can_direct)
+
+        # Set the periods
+        if sample_scatter_period is not None:
+            data_builder.set_sample_scatter_period(sample_scatter_period)
+
+        if sample_trans_period is not None:
+            data_builder.set_sample_transmission_period(sample_trans_period)
+
+        if sample_direct_period is not None:
+            data_builder.set_sample_direct_period(sample_direct_period)
+
+        if can_scatter_period is not None:
+            data_builder.set_can_scatter_period(can_scatter_period)
+
+        if can_trans_period is not None:
+            data_builder.set_can_transmission_period(can_trans_period)
+
+        if can_direct_period is not None:
+            data_builder.set_can_direct_period(can_direct_period)
+
+        # Add the calibration
+        if calibration is not None:
+            data_builder.set_calibration(calibration)
+
+        data_info = data_builder.build()
+
+        # Get the sample state
+        test_director = TestDirector()
+        test_director.set_states(data_state=data_info)
+
+        return test_director.construct()
+
+    def _evaluate_workspace_type(self, load_alg, num_workspaces, workspace_name, workspace_type, index):
+        if num_workspaces == 1:
+            ws = load_alg.getProperty(workspace_name).value
+            self.assertTrue(isinstance(ws, workspace_type[index]))
+        elif num_workspaces > 1:
+            for ind in range(1, num_workspaces + 1):
+                output_name = workspace_name + "_" + str(ind)
+                ws = load_alg.getProperty(output_name).value
+                self.assertTrue(isinstance(ws, workspace_type[index]))
+        else:
+            ws = load_alg.getProperty(workspace_name).value
+            self.assertTrue(ws is None)
+
+    def _do_test_output(self, load_alg, expected_number_of_workspaces, expected_number_on_ads, workspace_type):
+        #  Check the number of workspaces
+        tags_numbers = ["NumberOfSampleScatterWorkspaces", "NumberOfSampleTransmissionWorkspaces",
+                        "NumberOfSampleDirectWorkspaces", "NumberOfCanScatterWorkspaces",
+                        "NumberOfCanTransmissionWorkspaces", "NumberOfCanDirectWorkspaces"]
+        for num_workspaces, num_name in zip(expected_number_of_workspaces, tags_numbers):
+            number_of_workspaces = load_alg.getProperty(num_name).value
+            self.assertTrue(number_of_workspaces == num_workspaces)
+
+        # Check that workspaces were loaded
+        tags_workspaces = ["SampleScatterWorkspace", "SampleTransmissionWorkspace",
+                           "SampleDirectWorkspace", "CanScatterWorkspace",
+                           "CanTransmissionWorkspace", "CanDirectWorkspace"]
+        index = 0
+        for num_workspaces, workspace_name in zip(expected_number_of_workspaces, tags_workspaces):
+            self._evaluate_workspace_type(load_alg, num_workspaces, workspace_name, workspace_type, index)
+            index += 1
+
+        # Check for the monitor workspaces
+        num_monitor_workspaces = [expected_number_of_workspaces[0], expected_number_of_workspaces[3]]
+        tags_monitors = ["SampleScatterMonitorWorkspace", "CanScatterMonitorWorkspace"]
+        workspace_type_monitor = [Workspace2D, Workspace2D]
+        index = 0
+        for num_workspaces, workspace_name in zip(num_monitor_workspaces, tags_monitors):
+            self._evaluate_workspace_type(load_alg, num_workspaces, workspace_name, workspace_type_monitor, index)
+            index += 1
+
+        # Confirm there is nothing on the ADS
+        workspaces_on_the_ads = AnalysisDataService.getObjectNames()
+        self.assertTrue(len(workspaces_on_the_ads) == expected_number_on_ads)
+
+    @staticmethod
+    def _has_calibration_been_applied(load_alg):
+        sample_workspace = load_alg.getProperty("SampleScatterWorkspace").value
+        if sample_workspace is None:
+            sample_workspace = load_alg.getProperty("SampleScatterWorkspace_1").value
+        has_calibration_tag = has_tag(CALIBRATION_WORKSPACE_TAG, sample_workspace)
+        has_file_tag = has_tag(SANS_FILE_TAG, sample_workspace)
+        return has_calibration_tag and has_file_tag
+
+    @staticmethod
+    def _run_load(state, publish_to_cache, use_cached, move_workspace=False, beam_coordinates=None,
+                  component=None, output_workspace_names=None):
+        load_alg = AlgorithmManager.createUnmanaged("SANSLoad")
+        load_alg.setChild(True)
+        load_alg.setRethrows(True)
+        load_alg.initialize()
+
+        state_dict = state.property_manager
+        load_alg.setProperty("SANSState", state_dict)
+        load_alg.setProperty("PublishToCache", publish_to_cache)
+        load_alg.setProperty("UseCached", use_cached)
+        load_alg.setProperty("MoveWorkspace", move_workspace)
+        if move_workspace:
+            load_alg.setProperty("Component", component)
+            load_alg.setProperty("BeamCoordinates", beam_coordinates)
+
+        if output_workspace_names:
+            for name, value in list(output_workspace_names.items()):
+                load_alg.setProperty(name, value)
+
+        # Act
+        load_alg.execute()
+        # self.assertTrue(load_alg.isExecuted())
+        return load_alg
+
+    def test_that_raises_when_transmission_is_event(self):
+        # Arrange
+        state = SANSLoadTest._get_simple_state(sample_scatter="SANS2D00028827",
+                                               sample_trans="SANS2D00028827",
+                                               sample_direct="SANS2D00028827",
+                                               calibration="TUBE_SANS2D_BOTH_27345_20Mar15.nxs")
+
+        # Act
+        output_workspace_names = {"SampleScatterWorkspace": "sample_scatter",
+                                  "SampleScatterMonitorWorkspace": "sample_monitor_scatter",
+                                  "SampleTransmissionWorkspace": "sample_transmission",
+                                  "SampleDirectWorkspace": "sample_direct"}
+
+        kwargs = {"state": state, "publish_to_cache": False, "use_cached": False, "move_workspace": False,
+                  "output_workspace_names": output_workspace_names}
+
+        self.assertRaises(RuntimeError, self._run_load, **kwargs)
+
+    def test_that_runs_for_isis_nexus_file_with_event_data_and_single_period(self):
+        # Arrange
+        state = SANSLoadTest._get_simple_state(sample_scatter="SANS2D00028827",
+                                               sample_trans="SANS2D00028784",
+                                               sample_direct="SANS2D00028804",
+                                               calibration="TUBE_SANS2D_BOTH_27345_20Mar15.nxs")
+
+        # Act
+        output_workspace_names = {"SampleScatterWorkspace": "sample_scatter",
+                                  "SampleScatterMonitorWorkspace": "sample_monitor_scatter",
+                                  "SampleTransmissionWorkspace": "sample_transmission",
+                                  "SampleDirectWorkspace": "sample_direct"}
+
+        load_alg = self._run_load(state, publish_to_cache=False, use_cached=False, move_workspace=False,
+                                  output_workspace_names=output_workspace_names)
+
+        # Assert
+        expected_number_of_workspaces = [1, 1, 1, 0, 0, 0]
+        expected_number_on_ads = 0
+        workspace_type = [EventWorkspace, Workspace2D, Workspace2D, None, None, None]
+        self._do_test_output(load_alg, expected_number_of_workspaces, expected_number_on_ads, workspace_type)
+
+        # Check that calibration is added
+        self.assertTrue(SANSLoadTest._has_calibration_been_applied(load_alg))
+
+    def test_that_runs_for_isis_nexus_file_with_histogram_data_and_single_period(self):
+        # Arrange
+        state = SANSLoadTest._get_simple_state(sample_scatter="SANS2D00000808",
+                                               sample_trans="SANS2D00028784",
+                                               sample_direct="SANS2D00028804")
+
+        # Act
+        output_workspace_names = {"SampleScatterWorkspace": "sample_scatter",
+                                  "SampleScatterMonitorWorkspace": "sample_monitor_scatter",
+                                  "SampleTransmissionWorkspace": "sample_transmission",
+                                  "SampleDirectWorkspace": "sample_direct"}
+        load_alg = self._run_load(state, publish_to_cache=False, use_cached=False, move_workspace=False,
+                                  output_workspace_names=output_workspace_names)
+
+        # Assert
+        expected_number_of_workspaces = [1, 1, 1, 0, 0, 0]
+        expected_number_on_ads = 0
+        workspace_type = [Workspace2D, Workspace2D, Workspace2D, None, None, None]
+        self._do_test_output(load_alg, expected_number_of_workspaces, expected_number_on_ads, workspace_type)
+
+        # Check that calibration is added
+        self.assertFalse(SANSLoadTest._has_calibration_been_applied(load_alg))
+
+    def test_that_runs_for_raw_file_with_histogram_data_and_single_period(self):
+        # Arrange
+        state = SANSLoadTest._get_simple_state(sample_scatter="SANS2D00000808.raw",
+                                               sample_trans="SANS2D00028784",
+                                               sample_direct="SANS2D00028804")
+
+        # Act
+        output_workspace_names = {"SampleScatterWorkspace": "sample_scatter",
+                                  "SampleScatterMonitorWorkspace": "sample_monitor_scatter",
+                                  "SampleTransmissionWorkspace": "sample_transmission",
+                                  "SampleDirectWorkspace": "sample_direct"}
+        load_alg = self._run_load(state, publish_to_cache=False, use_cached=False, move_workspace=False,
+                                  output_workspace_names=output_workspace_names)
+
+        # Assert
+        expected_number_of_workspaces = [1, 1, 1, 0, 0, 0]
+        expected_number_on_ads = 0
+        workspace_type = [Workspace2D, Workspace2D, Workspace2D, None, None, None]
+        self._do_test_output(load_alg, expected_number_of_workspaces, expected_number_on_ads, workspace_type)
+
+        # Check that calibration is added
+        self.assertFalse(SANSLoadTest._has_calibration_been_applied(load_alg))
+
+    def test_that_runs_for_isis_nexus_file_with_histogram_data_and_multi_period(self):
+        # Arrange
+        state = SANSLoadTest._get_simple_state(sample_scatter="SANS2D00005512.nxs")
+
+        # Act
+        output_workspace_names = {"SampleScatterWorkspace": "sample_scatter",
+                                  "SampleScatterMonitorWorkspace": "sample_monitor_scatter"}
+        load_alg = self._run_load(state, publish_to_cache=False, use_cached=False, move_workspace=False,
+                                  output_workspace_names=output_workspace_names)
+
+        # Assert
+        expected_number_of_workspaces = [13, 0, 0, 0, 0, 0]
+        expected_number_on_ads = 0
+        workspace_type = [Workspace2D, None, None, None, None, None]
+        self._do_test_output(load_alg, expected_number_of_workspaces, expected_number_on_ads, workspace_type)
+
+        # Check that calibration is added
+        self.assertFalse(SANSLoadTest._has_calibration_been_applied(load_alg))
+
+    def test_that_runs_for_isis_nexus_file_with_histogram_data_and_multi_period_and_select_single_period(self):
+        # Arrange
+        special_selection_on_group = 3
+        state = SANSLoadTest._get_simple_state(sample_scatter="SANS2D00005512.nxs",
+                                               sample_scatter_period=special_selection_on_group)
+
+        # Act
+        output_workspace_names = {"SampleScatterWorkspace": "sample_scatter",
+                                  "SampleScatterMonitorWorkspace": "sample_monitor_scatter"}
+        load_alg = self._run_load(state, publish_to_cache=False, use_cached=False, move_workspace=False,
+                                  output_workspace_names=output_workspace_names)
+
+        # Assert
+        expected_number_of_workspaces = [1, 0, 0, 0, 0, 0]
+        expected_number_on_ads = 0
+        workspace_type = [Workspace2D, None, None, None, None, None]
+        self._do_test_output(load_alg, expected_number_of_workspaces, expected_number_on_ads, workspace_type)
+
+        # Check that calibration is added
+        self.assertFalse(SANSLoadTest._has_calibration_been_applied(load_alg))
+
+    def test_that_can_load_isis_nexus_file_with_event_data_and_multi_period(self):
+        # Arrange
+        state = SANSLoadTest._get_simple_state(sample_scatter="LARMOR00013065.nxs",
+                                               calibration="80tubeCalibration_18-04-2016_r9330-9335.nxs")
+
+        # Act
+        output_workspace_names = {"SampleScatterWorkspace": "sample_scatter",
+                                  "SampleScatterMonitorWorkspace": "sample_monitor_scatter"}
+        load_alg = self._run_load(state, publish_to_cache=True, use_cached=True, move_workspace=False,
+                                  output_workspace_names=output_workspace_names)
+
+        # Assert
+        expected_number_of_workspaces = [4, 0, 0, 0, 0, 0]
+        expected_number_on_ads = 1
+        workspace_type = [EventWorkspace, None, None, None, None, None]
+        self._do_test_output(load_alg, expected_number_of_workspaces, expected_number_on_ads, workspace_type)
+
+        # Check that calibration is added
+        self.assertTrue(SANSLoadTest._has_calibration_been_applied(load_alg))
+
+        # Confirm that the ADS workspace contains the calibration file
+        try:
+            AnalysisDataService.retrieve("80tubeCalibration_18-04-2016_r9330-9335")
+            on_ads = True
+        except RuntimeError:
+            on_ads = False
+        self.assertTrue(on_ads)
+
+        # Cleanup
+        remove_all_workspaces_from_ads()
+
+    def test_that_runs_for_isis_nexus_file_with_event_data_and_multi_period_and_select_single_period(self):
+        # Arrange
+        special_selection_on_group = 3
+        state = SANSLoadTest._get_simple_state(sample_scatter="LARMOR00013065.nxs",
+                                               sample_scatter_period=special_selection_on_group)
+
+        # Act
+        output_workspace_names = {"SampleScatterWorkspace": "sample_scatter",
+                                  "SampleScatterMonitorWorkspace": "sample_monitor_scatter"}
+        load_alg = self._run_load(state, publish_to_cache=True, use_cached=True, move_workspace=False,
+                                  output_workspace_names=output_workspace_names)
+
+        # Assert
+        expected_number_of_workspaces = [1, 0, 0, 0, 0, 0]
+        expected_number_on_ads = 0
+        workspace_type = [EventWorkspace, None, None, None, None, None]
+        self._do_test_output(load_alg, expected_number_of_workspaces, expected_number_on_ads, workspace_type)
+
+        # Check that calibration has not been added
+        self.assertFalse(SANSLoadTest._has_calibration_been_applied(load_alg))
+
+        # Cleanup
+        remove_all_workspaces_from_ads()
+
+    def test_that_can_load_single_period_from_added_multi_period_histogram_file(self):
+        # Arrange
+        special_selection_on_group = 7
+        state = SANSLoadTest._get_simple_state(sample_scatter="AddedMultiPeriodTestFile-add.nxs",
+                                               sample_scatter_period=special_selection_on_group)
+
+        # Act
+        output_workspace_names = {"SampleScatterWorkspace": "sample_scatter",
+                                  "SampleScatterMonitorWorkspace": "sample_monitor_scatter"}
+        load_alg = self._run_load(state, publish_to_cache=True, use_cached=True, move_workspace=False,
+                                  output_workspace_names=output_workspace_names)
+
+        # Assert
+        expected_number_of_workspaces = [1, 0, 0, 0, 0, 0]
+        expected_number_on_ads = 0
+        workspace_type = [Workspace2D, None, None, None, None, None]
+        self._do_test_output(load_alg, expected_number_of_workspaces, expected_number_on_ads, workspace_type)
+
+        # Check that calibration is added
+        self.assertFalse(SANSLoadTest._has_calibration_been_applied(load_alg))
+
+        # Cleanup
+        remove_all_workspaces_from_ads()
+
+    def test_that_can_load_all_periods_from_added_multi_period_histogram_file(self):
+        # Arrange
+        state = SANSLoadTest._get_simple_state(sample_scatter="AddedMultiPeriodTestFile-add.nxs")
+
+        # Act
+        output_workspace_names = {"SampleScatterWorkspace": "sample_scatter",
+                                  "SampleScatterMonitorWorkspace": "sample_monitor_scatter"}
+        load_alg = self._run_load(state, publish_to_cache=False, use_cached=False, move_workspace=False,
+                                  output_workspace_names=output_workspace_names)
+
+        # Assert
+        expected_number_of_workspaces = [13, 0, 0, 0, 0, 0]
+        expected_number_on_ads = 0
+        workspace_type = [Workspace2D, None, None, None, None, None]
+        self._do_test_output(load_alg, expected_number_of_workspaces, expected_number_on_ads, workspace_type)
+
+        # Check that calibration is added
+        self.assertFalse(SANSLoadTest._has_calibration_been_applied(load_alg))
+
+        # Cleanup
+        remove_all_workspaces_from_ads()
+
+    def test_that_can_load_single_period_from_added_multi_period_event_file(self):
+        # Arrange
+        special_selection_on_group = 2
+        state = SANSLoadTest._get_simple_state(sample_scatter="V2_LARMOR00013065-add.nxs",
+                                               sample_scatter_period=special_selection_on_group)
+
+        # Act
+        output_workspace_names = {"SampleScatterWorkspace": "sample_scatter",
+                                  "SampleScatterMonitorWorkspace": "sample_monitor_scatter"}
+        load_alg = self._run_load(state, publish_to_cache=True, use_cached=True, move_workspace=False,
+                                  output_workspace_names=output_workspace_names)
+
+        # Assert
+        expected_number_of_workspaces = [1, 0, 0, 0, 0, 0]
+        expected_number_on_ads = 0
+        workspace_type = [EventWorkspace, None, None, None, None, None]
+        self._do_test_output(load_alg, expected_number_of_workspaces, expected_number_on_ads, workspace_type)
+
+        # Check that calibration is added
+        self.assertFalse(SANSLoadTest._has_calibration_been_applied(load_alg))
+
+        # Cleanup
+        remove_all_workspaces_from_ads()
+
+    def test_that_can_load_all_periods_from_added_multi_period_event_file(self):
+        # Arrange
+        state = SANSLoadTest._get_simple_state(sample_scatter="V2_LARMOR00013065-add.nxs")
+
+        # Act
+        output_workspace_names = {"SampleScatterWorkspace": "sample_scatter",
+                                  "SampleScatterMonitorWorkspace": "sample_monitor_scatter"}
+        load_alg = self._run_load(state, publish_to_cache=True, use_cached=True, move_workspace=False,
+                                  output_workspace_names=output_workspace_names)
+
+        # Assert
+        expected_number_of_workspaces = [4, 0, 0, 0, 0, 0]
+        expected_number_on_ads = 0
+        workspace_type = [EventWorkspace, None, None, None, None, None]
+        self._do_test_output(load_alg, expected_number_of_workspaces, expected_number_on_ads, workspace_type)
+
+        # Check that calibration is added
+        self.assertFalse(SANSLoadTest._has_calibration_been_applied(load_alg))
+
+        # Cleanup
+        remove_all_workspaces_from_ads()
+
+
+class SANSLoadDataRunnerTest(stresstesting.MantidStressTest):
+    def __init__(self):
+        stresstesting.MantidStressTest.__init__(self)
+        self._success = False
+
+    def runTest(self):
+        suite = unittest.TestSuite()
+        suite.addTest(unittest.makeSuite(SANSLoadFactoryTest, 'test'))
+        suite.addTest(unittest.makeSuite(SANSLoadTest, 'test'))
+        runner = unittest.TextTestRunner()
+        res = runner.run(suite)
+        if res.wasSuccessful():
+            self._success = True
+
+    def requiredMemoryMB(self):
+        return 2000
+
+    def validate(self):
+        return self._success
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Testing/SystemTests/tests/analysis/SANSMoveTest.py b/Testing/SystemTests/tests/analysis/SANSMoveTest.py
new file mode 100644
index 0000000000000000000000000000000000000000..94ad59dd265ecf44bc09bc97f7731ed1f3f7aca8
--- /dev/null
+++ b/Testing/SystemTests/tests/analysis/SANSMoveTest.py
@@ -0,0 +1,424 @@
+# pylint: disable=too-many-public-methods, invalid-name, too-many-arguments
+
+import unittest
+import stresstesting
+
+
+from mantid.api import AlgorithmManager
+from mantid.kernel import (Quat, V3D)
+from sans.algorithm_detail.move_workspaces import (SANSMoveFactory, SANSMoveLOQ, SANSMoveSANS2D, SANSMoveLARMORNewStyle,
+                                                   SANSMoveLARMOROldStyle)
+from sans.common.enums import (SANSFacility, DetectorType)
+# Not clear why the names in the module are not found by Pylint, but it seems to get confused. Hence this check
+# needs to be disabled here.
+# pylint: disable=no-name-in-module
+from sans.test_helper.test_director import TestDirector
+from sans.state.move import get_move_builder
+from sans.state.data import get_data_builder
+
+
+def load_workspace(file_name):
+    alg = AlgorithmManager.createUnmanaged("Load")
+    alg.initialize()
+    alg.setChild(True)
+    alg.setProperty("Filename", file_name)
+    alg.setProperty("OutputWorkspace", "dummy")
+    alg.execute()
+    return alg.getProperty("OutputWorkspace").value
+
+
+class SANSMoveFactoryTest(unittest.TestCase):
+    def _do_test(self, file_name, mover_type):
+        # Arrange
+        workspace = load_workspace(file_name)
+        move_factory = SANSMoveFactory()
+        # Act
+        mover = move_factory.create_mover(workspace)
+        # Assert
+        self.assertTrue(isinstance(mover, mover_type))
+
+    def test_that_LOQ_strategy_is_selected(self):
+        file_name = "LOQ74044"
+        mover_type = SANSMoveLOQ
+        self._do_test(file_name, mover_type)
+
+    def test_that_SANS2D_strategy_is_selected(self):
+        file_name = "SANS2D00028784"
+        mover_type = SANSMoveSANS2D
+        self._do_test(file_name, mover_type)
+
+    def test_that_LARMOR_new_style_strategy_is_selected(self):
+        file_name = "LARMOR00002260"
+        mover_type = SANSMoveLARMORNewStyle
+        self._do_test(file_name, mover_type)
+
+    def test_that_LARMOR_8Tubes_strategy_is_selected(self):
+        file_name = "LARMOR00000063"
+        mover_type = SANSMoveLARMOROldStyle
+        self._do_test(file_name, mover_type)
+
+
+class SANSMoveTest(unittest.TestCase):
+    @staticmethod
+    def _get_simple_state(sample_scatter, lab_x_translation_correction=None, lab_z_translation_correction=None):
+        # Set the data
+        data_builder = get_data_builder(SANSFacility.ISIS)
+        data_builder.set_sample_scatter(sample_scatter)
+        data_info = data_builder.build()
+
+        # Set the move parameters
+        builder = get_move_builder(data_info)
+        if lab_x_translation_correction is not None:
+            builder.set_LAB_x_translation_correction(lab_x_translation_correction)
+        if lab_z_translation_correction is not None:
+            builder.set_LAB_z_translation_correction(lab_z_translation_correction)
+        move_info = builder.build()
+
+        # Get the sample state
+        test_director = TestDirector()
+        test_director.set_states(data_state=data_info, move_state=move_info)
+        return test_director.construct()
+
+    @staticmethod
+    def _get_position_and_rotation(workspace, move_info, component):
+        instrument = workspace.getInstrument()
+        component_name = move_info.detectors[component].detector_name
+        detector = instrument.getComponentByName(component_name)
+        position = detector.getPos()
+        rotation = detector.getRotation()
+        return position, rotation
+
+    @staticmethod
+    def _provide_mover(workspace):
+        move_factory = SANSMoveFactory()
+        return move_factory.create_mover(workspace)
+
+    def compare_expected_position(self, expected_position, expected_rotation, component, move_info, workspace):
+        position, rotation = SANSMoveTest._get_position_and_rotation(workspace, move_info, component)
+        for index in range(0, 3):
+            self.assertAlmostEqual(position[index], expected_position[index], delta=1e-4)
+        for index in range(0, 4):
+            self.assertAlmostEqual(rotation[index], expected_rotation[index], delta=1e-4)
+
+    def check_that_elementary_displacement_with_only_translation_is_correct(self, workspace, move_alg, move_info,
+                                                                            coordinates, component, component_key):
+        # Get position and rotation before the move
+        position_before_move, rotation_before_move = SANSMoveTest._get_position_and_rotation(workspace, move_info,
+                                                                                             component_key)
+        expected_position_elementary_move = V3D(position_before_move[0] - coordinates[0],
+                                                position_before_move[1] - coordinates[1],
+                                                position_before_move[2])
+        expected_rotation = rotation_before_move
+        move_alg.setProperty("BeamCoordinates", coordinates)
+        move_alg.setProperty("MoveType", "ElementaryDisplacement")
+        move_alg.setProperty("Component", component)
+        move_alg.execute()
+        self.assertTrue(move_alg.isExecuted())
+
+        self.compare_expected_position(expected_position_elementary_move, expected_rotation,
+                                       component_key, move_info, workspace)
+
+    def check_that_sets_to_zero(self, workspace, move_alg, move_info, comp_name=None):
+        # Reset the position to zero
+        move_alg.setProperty("Workspace", workspace)
+        move_alg.setProperty("MoveType", "SetToZero")
+        if comp_name is not None:
+            move_alg.setProperty("Component", comp_name)
+        else:
+            move_alg.setProperty("Component", "")
+        move_alg.execute()
+        self.assertTrue(move_alg.isExecuted())
+
+        # Get the components to compare
+        if comp_name is None:
+            hab_name = move_info.detectors[DetectorType.to_string(DetectorType.HAB)].detector_name
+            lab_name = move_info.detectors[DetectorType.to_string(DetectorType.LAB)].detector_name
+            component_names = list(move_info.monitor_names.values())
+            component_names.append(hab_name)
+            component_names.append(lab_name)
+            component_names.append("some-sample-holder")
+        else:
+            component_names = [comp_name]
+
+        # Ensure that the positions on the base instrument and the instrument are the same
+        instrument = workspace.getInstrument()
+        base_instrument = instrument.getBaseInstrument()
+        for component_name in component_names:
+            # Confirm that the positions are the same
+            component = instrument.getComponentByName(component_name)
+            base_component = base_instrument.getComponentByName(component_name)
+
+            # If we are dealing with a monitor which has not been implemented we need to continue
+            if component is None or base_component is None:
+                continue
+
+            position = component.getPos()
+            position_base = base_component.getPos()
+            for index in range(0, 3):
+                self.assertAlmostEqual(position[index], position_base[index], delta=1e-4)
+            rotation = component.getRotation()
+            rotation_base = base_component.getRotation()
+            for index in range(0, 4):
+                self.assertAlmostEqual(rotation[index], rotation_base[index], delta=1e-4)
+
+    def _run_move(self, state, workspace, move_type, beam_coordinates=None, component=None):
+        move_alg = AlgorithmManager.createUnmanaged("SANSMove")
+        move_alg.setChild(True)
+        move_alg.initialize()
+        state_dict = state.property_manager
+
+        move_alg.setProperty("SANSState", state_dict)
+        move_alg.setProperty("Workspace", workspace)
+        move_alg.setProperty("MoveType", move_type)
+
+        if beam_coordinates is not None:
+            move_alg.setProperty("BeamCoordinates", beam_coordinates)
+
+        if component is not None:
+            move_alg.setProperty("Component", component)
+
+        # Act
+        move_alg.execute()
+        self.assertTrue(move_alg.isExecuted())
+        return move_alg
+
+    def test_that_LOQ_can_perform_move(self):
+        # Arrange
+        # Setup data info
+        file_name = "LOQ74044"
+        lab_x_translation_correction = 123.
+        beam_coordinates = [45, 25]
+        component = "main-detector-bank"
+        component_key = DetectorType.to_string(DetectorType.LAB)
+
+        workspace = load_workspace(file_name)
+        state = SANSMoveTest._get_simple_state(sample_scatter=file_name,
+                                               lab_x_translation_correction=lab_x_translation_correction)
+
+        # Act
+        move_alg = self._run_move(state, workspace=workspace, move_type="InitialMove",
+                                  beam_coordinates=beam_coordinates, component=component)
+
+        # Act + Assert for initial move
+        move_info = state.move
+        center_position = move_info.center_position
+        initial_z_position = 15.15
+        expected_position = V3D(center_position - beam_coordinates[0] + lab_x_translation_correction,
+                                center_position - beam_coordinates[1],
+                                initial_z_position)
+        expected_rotation = Quat(1., 0., 0., 0.)
+        self.compare_expected_position(expected_position, expected_rotation, component_key, move_info, workspace)
+
+        # # Act + Assert for elementary move on high-angle bank
+        component_elementary_move = "HAB"
+        component_elementary_move_key = DetectorType.to_string(DetectorType.HAB)
+
+        beam_coordinates_elementary_move = [120, 135]
+        self.check_that_elementary_displacement_with_only_translation_is_correct(workspace, move_alg, move_info,
+                                                                                 beam_coordinates_elementary_move,
+                                                                                 component_elementary_move,
+                                                                                 component_elementary_move_key)
+
+        # Act + Assert for setting to zero position for all
+        self.check_that_sets_to_zero(workspace, move_alg, state.move, comp_name="main-detector-bank")
+
+    def test_that_SANS2D_can_move(self):
+        # Arrange
+        file_name = "SANS2D00028784"
+        lab_z_translation_correction = 123.
+
+        workspace = load_workspace(file_name)
+        state = SANSMoveTest._get_simple_state(sample_scatter=file_name,
+                                               lab_z_translation_correction=lab_z_translation_correction)
+        beam_coordinates = [0.1076, -0.0835]
+
+        # Act
+        # The component input is not relevant for SANS2D's initial move. All detectors are moved
+        component = None
+        move_alg = self._run_move(state, workspace=workspace, move_type="InitialMove",
+                                  beam_coordinates=beam_coordinates, component=component)
+
+        # Assert for initial move for low angle bank
+        # These values are on the workspace and in the sample logs,
+        component_to_investigate = DetectorType.to_string(DetectorType.LAB)
+        initial_z_position = 23.281
+        rear_det_z = 11.9989755859
+        offset = 4.
+        total_x = 0.
+        total_y = 0.
+        total_z = initial_z_position + rear_det_z - offset + lab_z_translation_correction
+        expected_position = V3D(total_x - beam_coordinates[0], total_y - beam_coordinates[1], total_z)
+        expected_rotation = Quat(1., 0., 0., 0.)
+        self.compare_expected_position(expected_position, expected_rotation,
+                                       component_to_investigate, state.move, workspace)
+
+        # Assert for initial move for high angle bank
+        # These values are on the workspace and in the sample logs
+        component_to_investigate = DetectorType.to_string(DetectorType.HAB)
+        initial_x_position = 1.1
+        x_correction = -0.187987540973
+        initial_z_position = 23.281
+        z_correction = 1.00575649188
+        total_x = initial_x_position + x_correction
+        total_y = 0.
+        total_z = initial_z_position + z_correction
+        expected_position = V3D(total_x - beam_coordinates[0], total_y - beam_coordinates[1], total_z)
+        expected_rotation = Quat(0.9968998362876025, 0., 0.07868110579898738, 0.)
+        self.compare_expected_position(expected_position, expected_rotation,
+                                       component_to_investigate, state.move, workspace)
+
+        # Act + Assert for elementary move
+        component_elementary_move = "rear-detector"
+        component_elementary_move_key = DetectorType.to_string(DetectorType.LAB)
+        beam_coordinates_elementary_move = [120, 135]
+        self.check_that_elementary_displacement_with_only_translation_is_correct(workspace, move_alg, state.move,
+                                                                                 beam_coordinates_elementary_move,
+                                                                                 component_elementary_move,
+                                                                                 component_elementary_move_key)
+
+        # # Act + Assert for setting to zero position for all
+        self.check_that_sets_to_zero(workspace, move_alg, state.move, comp_name=None)
+
+    def test_that_LARMOR_new_style_can_move(self):
+        # Arrange
+        file_name = "LARMOR00002260"
+        lab_x_translation_correction = 123.
+
+        workspace = load_workspace(file_name)
+        state = SANSMoveTest._get_simple_state(sample_scatter=file_name,
+                                               lab_x_translation_correction=lab_x_translation_correction)
+
+        # Note that the first entry is an angle while the second is a translation (in meter)
+        beam_coordinates = [24., 38.]
+
+        # Act for initial move
+        component = None
+        move_alg = self._run_move(state, workspace=workspace, move_type="InitialMove",
+                                  beam_coordinates=beam_coordinates, component=component)
+
+        # Assert low angle bank for initial move
+        # These values are on the workspace and in the sample logs
+        component_to_investigate = DetectorType.to_string(DetectorType.LAB)
+        # The rotation couples the movements, hence we just insert absoltute value, to have a type of regression test.
+        expected_position = V3D(0, -38, 25.3)
+        expected_rotation = Quat(0.978146, 0, 0.20792, 0)
+        self.compare_expected_position(expected_position, expected_rotation,
+                                       component_to_investigate, state.move, workspace)
+
+        # Act + Assert for setting to zero position for all
+        self.check_that_sets_to_zero(workspace, move_alg, state.move, comp_name=None)
+
+    def test_that_LARMOR_old_Style_can_be_moved(self):
+        # Arrange
+        file_name = "LARMOR00000063"
+        workspace = load_workspace(file_name)
+        state = SANSMoveTest._get_simple_state(sample_scatter=file_name)
+
+        # Note that both entries are translations
+        beam_coordinates = [24., 38.]
+
+        # Act
+        component = None
+        move_alg = self._run_move(state, workspace=workspace, move_type="InitialMove",
+                                  beam_coordinates=beam_coordinates, component=component)
+
+        # Assert low angle bank for initial move
+        # These values are on the workspace and in the sample logs
+        component_to_investigate = DetectorType.to_string(DetectorType.LAB)
+        # The rotation couples the movements, hence we just insert absolute value, to have a type of regression test
+        # solely.
+        expected_position = V3D(-beam_coordinates[0], -beam_coordinates[1], 25.3)
+        expected_rotation = Quat(1., 0., 0., 0.)
+        self.compare_expected_position(expected_position, expected_rotation,
+                                       component_to_investigate, state.move, workspace)
+
+        # Act + Assert for setting to zero position for all
+        self.check_that_sets_to_zero(workspace, move_alg, state.move, comp_name=None)
+
+    def test_that_missing_beam_centre_is_taken_from_move_state(self):
+        # Arrange
+        file_name = "SANS2D00028784"
+        lab_z_translation_correction = 123.
+
+        workspace = load_workspace(file_name)
+        state = SANSMoveTest._get_simple_state(sample_scatter=file_name,
+                                               lab_z_translation_correction=lab_z_translation_correction)
+        # These values should be used instead of an explicitly specified beam centre
+        state.move.detectors[DetectorType.to_string(DetectorType.HAB)].sample_centre_pos1 = 26.
+        state.move.detectors[DetectorType.to_string(DetectorType.HAB)].sample_centre_pos2 = 98.
+
+        # Act
+        # The component input is not relevant for SANS2D's initial move. All detectors are moved
+        component = "front-detector"
+        self._run_move(state, workspace=workspace, move_type="InitialMove", component=component)
+
+        # Assert for initial move for high angle bank
+        # These values are on the workspace and in the sample logs
+        component_to_investigate = DetectorType.to_string(DetectorType.HAB)
+        initial_x_position = 1.1
+        x_correction = -0.187987540973
+        initial_z_position = 23.281
+        z_correction = 1.00575649188
+        total_x = initial_x_position + x_correction
+        total_y = 0.
+        total_z = initial_z_position + z_correction
+        expected_position = V3D(total_x - 26., total_y - 98., total_z)
+        expected_rotation = Quat(0.9968998362876025, 0., 0.07868110579898738, 0.)
+        self.compare_expected_position(expected_position, expected_rotation,
+                                       component_to_investigate, state.move, workspace)
+
+    def test_that_missing_beam_centre_is_taken_from_lab_move_state_when_no_component_is_specified(self):
+        # Arrange
+        file_name = "SANS2D00028784"
+        lab_z_translation_correction = 123.
+
+        workspace = load_workspace(file_name)
+        state = SANSMoveTest._get_simple_state(sample_scatter=file_name,
+                                               lab_z_translation_correction=lab_z_translation_correction)
+        # These values should be used instead of an explicitly specified beam centre
+        state.move.detectors[DetectorType.to_string(DetectorType.LAB)].sample_centre_pos1 = 26.
+        state.move.detectors[DetectorType.to_string(DetectorType.LAB)].sample_centre_pos2 = 98.
+
+        # Act
+        # The component input is not relevant for SANS2D's initial move. All detectors are moved
+        component = None
+        self._run_move(state, workspace=workspace, move_type="InitialMove", component=component)
+
+        # Assert for initial move for low angle bank
+        # These values are on the workspace and in the sample logs,
+        component_to_investigate = DetectorType.to_string(DetectorType.LAB)
+        initial_z_position = 23.281
+        rear_det_z = 11.9989755859
+        offset = 4.
+        total_x = 0.
+        total_y = 0.
+        total_z = initial_z_position + rear_det_z - offset + lab_z_translation_correction
+        expected_position = V3D(total_x - 26., total_y - 98., total_z)
+        expected_rotation = Quat(1., 0., 0., 0.)
+        self.compare_expected_position(expected_position, expected_rotation,
+                                       component_to_investigate, state.move, workspace)
+
+
+class SANSMoveRunnerTest(stresstesting.MantidStressTest):
+    def __init__(self):
+        stresstesting.MantidStressTest.__init__(self)
+        self._success = False
+
+    def runTest(self):
+        suite = unittest.TestSuite()
+        suite.addTest(unittest.makeSuite(SANSMoveFactoryTest, 'test'))
+        suite.addTest(unittest.makeSuite(SANSMoveTest, 'test'))
+        runner = unittest.TextTestRunner()
+        res = runner.run(suite)
+        if res.wasSuccessful():
+            self._success = True
+
+    def requiredMemoryMB(self):
+        return 1000
+
+    def validate(self):
+        return self._success
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/buildconfig/CMake/GNUSetup.cmake b/buildconfig/CMake/GNUSetup.cmake
index 30df52b6071760de3076ac07b3eed5248d739ec6..b25b8c6c6f43f719ff49de6ab06168bbf74fa87b 100644
--- a/buildconfig/CMake/GNUSetup.cmake
+++ b/buildconfig/CMake/GNUSetup.cmake
@@ -6,6 +6,15 @@
 # project settings should be included in the relevant CMakeLists.txt file
 # for that project.
 
+option ( USE_CCACHE "Use ccache to cache object artifacts if available" ON )
+if ( USE_CCACHE )
+  find_program(CCACHE_FOUND ccache)
+  if(CCACHE_FOUND)
+    set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache)
+    set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache)
+  endif()
+endif()
+
 # Set our own compiler version flag from the cmake one and export it globally
 if ( CMAKE_COMPILER_IS_GNUCXX )
   set( GCC_COMPILER_VERSION ${CMAKE_CXX_COMPILER_VERSION} CACHE INTERNAL "")
diff --git a/docs/source/algorithms/FilterEvents-v1.rst b/docs/source/algorithms/FilterEvents-v1.rst
index 40a591c7faadcc61e7fd3300382db3e7a858f55c..5131167ebb55c939a5baa6cf6390ee4b97a17e91 100644
--- a/docs/source/algorithms/FilterEvents-v1.rst
+++ b/docs/source/algorithms/FilterEvents-v1.rst
@@ -94,6 +94,25 @@ Comparing with other event filtering algorithms
 Wiki page :ref:`EventFiltering` has a detailed introduction on event
 filtering in MantidPlot.
 
+
+Developer's Note
+----------------
+
+Splitters given by TableWorkspace
+#################################
+
+ - The *start/stop* time is converted to **m_vecSplitterTime**.
+ - The *splitting target* (in string) is mapped to a set of continuous integers that are stored in **m_vecSplitterGroup**.
+   - The mapping will be recorded in **m_targetIndexMap** and **m_wsGroupIndexTargetMap**.
+   - Class variable **m_maxTargetIndex** is set up to record the highest target group/index,i.e., the max value of m_vecSplitterGroup
+
+
+Undefined splitting target
+##########################
+
+Indexed as **0** in **m_vecSplitterGroup**.
+
+
 Usage
 -----
 
@@ -131,6 +150,53 @@ Output:
     workspace tempsplitws_5 has 5133 events
     workspace tempsplitws_unfiltered has 50603 events
 
+**Example - Filtering event by a user-generated TableWorkspace**
+
+.. testcode:: FilterEventNoCorrection
+
+    ws = Load(Filename='CNCS_7860_event.nxs')
+
+    # create TableWorkspace
+    split_table_ws = CreateEmptyTableWorkspace()
+    split_table_ws.addColumn('float', 'start')
+    split_table_ws.addColumn('float', 'stop')
+    split_table_ws.addColumn('str', 'target')
+
+    split_table_ws.addRow([0., 100., 'a'])
+    split_table_ws.addRow([200., 300., 'b'])
+    split_table_ws.addRow([400., 600., 'c'])
+    split_table_ws.addRow([600., 650., 'b'])
+
+    # filter evnets
+    FilterEvents(InputWorkspace=ws, SplitterWorkspace=split_table_ws,
+            OutputWorkspaceBaseName='tempsplitws3',  GroupWorkspaces=True,
+            FilterByPulseTime = False, OutputWorkspaceIndexedFrom1 = False,
+            CorrectionToSample = "None", SpectrumWithoutDetector = "Skip", SplitSampleLogs = False,
+            OutputTOFCorrectionWorkspace='mock')
+
+    # Print result
+    wsgroup = mtd["tempsplitws3"]
+    wsnames = wsgroup.getNames()
+    for name in sorted(wsnames):
+        tmpws = mtd[name]
+        print "workspace %s has %d events" % (name, tmpws.getNumberEvents())
+        split_log = tmpws.run().getProperty('splitter')
+        print 'event splitter log: entry 0 and entry 1 are {0} and {1}.'.format(split_log.times[0], split_log.times[1])
+
+
+Output:
+
+.. testoutput:: FilterEventNoCorrection
+
+    workspace tempsplitws3_a has 77580 events
+    event splitter log: entry 0 and entry 1 are 2010-03-25T16:08:37  and 2010-03-25T16:10:17 .
+    workspace tempsplitws3_b has 0 events
+    event splitter log: entry 0 and entry 1 are 2010-03-25T16:08:37  and 2010-03-25T16:11:57 .
+    workspace tempsplitws3_c has 0 events
+    event splitter log: entry 0 and entry 1 are 2010-03-25T16:08:37  and 2010-03-25T16:15:17 .
+    workspace tempsplitws3_unfiltered has 34686 events
+    event splitter log: entry 0 and entry 1 are 2010-03-25T16:08:37  and 2010-03-25T16:10:17 .
+
 
 **Example - Filtering event by pulse time**
 
diff --git a/docs/source/algorithms/SANSLoad-v1.rst b/docs/source/algorithms/SANSLoad-v1.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d6d69a56119a5a0552620c07c587fbb5029096ab
--- /dev/null
+++ b/docs/source/algorithms/SANSLoad-v1.rst
@@ -0,0 +1,80 @@
+.. algorithm::
+
+.. summary::
+
+.. alias::
+
+.. properties::
+
+Description
+-----------
+
+This algorithm loads SANS data sets. The loading can handle nexus and raw files which can be plain or multi-period data. The SANS data sets which can be loaded with this algorithm are:
+
+* sample scatter data which is the actual data under investigation. The algorithm loads the corresponding monitor workspace.
+* sample transmission data
+* sample direct data
+* can scatter data. The algorithm also loads the corresponding monitor workspace.
+* can transmission data
+* can direct data
+
+In addition a calibration file which is applied after the data has been loaded can be specified. This calibration workspace can be used when the *PublishToADS* option is enabled.
+
+
+Relevant SANSState entries for SANSLoad
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The required information for the loading operation is retrieved from a SANSState input. It contains information
+about the data files which are involved during the calibration process. It also contains information about the
+calibration file which is applied to the scatter workspaces.
+
+The elements of the SANSState are:
+
++---------------------------+--------+------------------------------------------+----------------------------------------------+
+| Entry                     | Type   | Description                              | Mandatory                                    |
++===========================+========+==========================================+==============================================+
+| sample_scatter            | String | The name of the sample scatter file      | Yes                                          |
++---------------------------+--------+------------------------------------------+----------------------------------------------+
+| sample_scatter_period     | Int    | The selected period or (0 for all)       | No                                           |
++---------------------------+--------+------------------------------------------+----------------------------------------------+
+| sample_transmission       | String | The name of the sample transmission file | No, only if sample_direct was specified      |
++---------------------------+--------+------------------------------------------+----------------------------------------------+
+| sample_transmission_period| Int    | The selected period or (0 for all)       | No                                           |
++---------------------------+--------+------------------------------------------+----------------------------------------------+
+| sample_direct             | String | The name of the sample direct file       | No, only if sample_transmission was specified|
++---------------------------+--------+------------------------------------------+----------------------------------------------+
+| sample_direct_period      | Int    | The selected period or (0 for all)       | No                                           |
++---------------------------+--------+------------------------------------------+----------------------------------------------+
+| can_scatter               | String | The name of the can scatter file         | No, only if can_transmission was specified   |
++---------------------------+--------+------------------------------------------+----------------------------------------------+
+| can_scatter_period        | Int    | The selected period or (0 for all)       | No                                           |
++---------------------------+--------+------------------------------------------+----------------------------------------------+
+| can_transmission          | String | The name of the can transmission file    | No, only if can_direct was specified         |
++---------------------------+--------+------------------------------------------+----------------------------------------------+
+| can_transmission_period   | Int    | The selected period or (0 for all)       | No                                           |
++---------------------------+--------+------------------------------------------+----------------------------------------------+
+| can_direct                | String | The name of the can direct file          | No, only if can_direct was specified         |
++---------------------------+--------+------------------------------------------+----------------------------------------------+
+| can_direct_period         | Int    | The selected period or (0 for all)       | No                                           |
++---------------------------+--------+------------------------------------------+----------------------------------------------+
+| calibration               | String | The name of the calibration file         | No                                           |
++---------------------------+--------+------------------------------------------+----------------------------------------------+
+
+Note that these settings should be only populated via the GUI or the Python Interface of ISIS SANS.
+
+Optimization Setting: *PublishToCache* and *UseCached*
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The *PublishToCache* setting will store the calibration workspace on the *AnalysisDataService* when it has been loaded for the first time. The loaded workspaces themselves will not be published.
+
+The *UseCached* setting will look for appropriate workspaces on the *AnalysisDataService* and use these workspaces instead of reloading them.
+
+Move a workspace
+~~~~~~~~~~~~~~~~
+
+It is possible to perform an initial, instrument-specific move of the selected component. Currently this move mechanism is implemented for **SANS2D**, **LOQ** and **LARMOR**. Other instruments will not be moved.
+If *MoveWorkspace* is selected, then a component and a beam position needs to be selected.
+
+.. categories::
+
+.. sourcelink::
diff --git a/docs/source/algorithms/SANSMove-v1.rst b/docs/source/algorithms/SANSMove-v1.rst
new file mode 100644
index 0000000000000000000000000000000000000000..dcd6c9473d1b2c4947a7419b6d5ee8763167ef3a
--- /dev/null
+++ b/docs/source/algorithms/SANSMove-v1.rst
@@ -0,0 +1,138 @@
+.. algorithm::
+
+.. summary::
+
+.. alias::
+
+.. properties::
+
+Description
+-----------
+
+This algorithm moves a SANS workspace according to the settings in the state object. Additionally the user can specify
+the beam centre. Note that if the beam centre is also specified in the state object, then the manual selection takes
+precedence. The way we perform a move is highly-instrument and in fact data-dependent. Currently the move mechanism
+is implemented for **SANS2D**, **LOQ** and **LARMOR**.
+
+
+Relevant SANSState entries for SANSMove
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The required information for the move operation is retrieved from a state object. It contains information which is
+specific to each instrument and to the specific IDF.
+
+
+Common elements of the move state object are:
++---------------------------+--------+------------------------------------------+--------------|---------------+
+| Entry                     | Type   | Description                              | Mandatory    | Default value
++===========================+========+==========================================+==============+================================+
+| sample_offset | Float | The offset of the sample in m | No | 0.0 |
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+| sample_offset_direction | CanonicalCoordinates enum | The direction of the sample offset | No | CanonicalCoordinates.Z|
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+| detectors | Dict | Dictionary of detectors. | auto setup | auto setup|
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+
+
+The detectors dictionary above maps to a state object for the individual detectors:
++---------------------------+--------+------------------------------------------+--------------|---------------+
+| Entry                     | Type   | Description                              | Mandatory    | Default value
++===========================+========+==========================================+==============+================================+
+| x_translation_correction | Float | X translation for the detector in m | No | 0.0 |
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+| y_translation_correction | Float | Y translation for the detector in m | No | 0.0 |
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+| z_translation_correction | Float | X translation for the detector in m | No | 0.0 |
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+| rotation_correction | Float | Rotation correction for the detector in degrees | No | 0.0 |
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+| side_correction | Float | Side correction for the detector in m | No | 0.0 |
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+| radius_correction | Float | Radius correction for the detector in m | No | 0.0 |
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+| x_tilt_correction | Float | X tilt for the detector in degrees | No | 0.0 |
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+| y_tilt_correction | Float | Y translation for the detector in degrees | No | 0.0 |
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+| z_tilt_correction | Float | X translation for the detector in degrees | No | 0.0 |
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+| sample_centre_pos1 | Float | Position 1 of the beam centre in m or degree, depending on the setting| No | 0.0 |
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+| sample_centre_pos2 | Float | Position 2 of the beam centre in m | No | 0.0 |
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+| detector_name | String | Detector name | auto setup | auto setup |
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+| detector_name_short | String | Short detector name | No | auto setup |
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+
+The individual instruments have additional settings.
+
+For LOQ:
++---------------------------+--------+------------------------------------------+--------------|---------------+
+| Entry                     | Type   | Description                              | Mandatory    | Default value
++===========================+========+==========================================+==============+================================+
+| monitor_names | Dict | A dictionary with monitor index vs monitor name | auto setup | auto setup |
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+| center_position | Float | The centre position | No | 317.5 / 1000.|
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+
+For SANS2D:
++---------------------------+--------+------------------------------------------+--------------|---------------+
+| Entry                     | Type   | Description                              | Mandatory    | Default value
++===========================+========+==========================================+==============+================================+
+| monitor_names | Dict | A dictionary with monitor index vs monitor name | auto setup | auto setup |
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+| hab_detector_radius | Float | Radius for the front detector in m | auto setup (through IPF) | 306.0 / 1000.|
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+| hab_detector_default_sd_m | Float | Default sd for front detector in m | auto setup (through IPF) | 317.5 / 1000.|
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+| hab_detector_default_x_m | Float | Default x for the front detector in m | auto setup (through IPF) | 1.1|
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+| lab_detector_default_sd_m | Float | Default sd for the rear detector in m | auto setup | 4.|
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+| hab_detector_x | Float | X for the front detector in m | auto setup | 0.|
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+| hab_detector_z | Float | Z for the front detector in m  | auto setup  | 317.5 / 1000.|
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+| hab_detector_rotation | Float | Rotation for the front detector | auto setup | 0.|
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+| lab_detector_x | Float | X for the rear detector in m | auto setup | 0.|
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+| lab_detector_z | Float | Z for the rear detector in m  | auto setup | 0.|
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+| monitor_4_offset | Float | Offset for monitor 4 | No | 0.|
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+
+
+For LARMOR:
++---------------------------+--------+------------------------------------------+--------------|---------------+
+| Entry                     | Type   | Description                              | Mandatory    | Default value
++===========================+========+==========================================+==============+================================+
+| monitor_names | Dict | A dictionary with monitor index vs monitor name | auto setup | auto setup |
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+| bench_rotation | Float | The angle for the bench rotation | No | 0.|
++---------------------------+--------+------------------------------------------+--------------|-------------------------------+
+
+
+Note that these settings should be only populated via the GUI or the Python Interface of ISIS SANS.
+
+Move options: *InitialMove*, *ElementaryDisplacement*, *SetToZero*
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The *InitialMove* setting is relevant when loading data before a reduction. It will apply all corrections which are specified in the state object.
+
+The *ElementaryDisplacement* will perform a relative translation/rotation according to the specified beam centre value.
+
+The *SetToZero* places the component into the default position.
+
+
+Beam Centre
+~~~~~~~~~~~~~~~~
+
+The beam centre for a reduction is normally specified in the state object, but it can also be specified in manually here.
+If the beam centre is specified explicitly, then it is being used instead of the setting in the state object.
+
+
+.. categories::
+
+.. sourcelink::
diff --git a/docs/source/release/v3.10.0/framework.rst b/docs/source/release/v3.10.0/framework.rst
index 0a91986fe3ce7e093975c643163014f70af81ffa..99c619347896290433c81190dee7ffd32bc69365 100644
--- a/docs/source/release/v3.10.0/framework.rst
+++ b/docs/source/release/v3.10.0/framework.rst
@@ -23,9 +23,13 @@ New
 Improved
 ########
 
-- :ref`RawFileInfo <algm-RawFileInfo-v1>` now provides sample information.
-- :ref`SetInstrumentParameter <algm-SetInstrumentParameter-v1>` now supports also boolean parameters, and better validates the inputs.
-- Two new properties were added to :ref:`algm-Integration`: *RangeLowerList* and *RangeUpperList* can be used to give histogram-specific integration ranges.
+- :ref:`RawFileInfo <algm-RawFileInfo-v1>` now provides sample information.
+- :ref:`SetInstrumentParameter <algm-SetInstrumentParameter-v1>` now supports also boolean parameters, and better validates the inputs.
+- :ref:`FilterEvents <algm-FilterEvents-v1>` now accepts a general TableWorkspace as the splitters workspace.  The TableWorkspace must have at least 3 columns.  The first 3 columns are for relative starting time, relative stopping time and target workspace tag for splitters, respectively.
+- :ref:`FilterEvents <algm-FilterEvents-v1>` now generates a sample log named *splitter* of each output workspace (i.e., splitted workspace) to represent the event filter that is applied to it.
+- :ref:`FilterEvents <algm-FilterEvents-v1>` now splits all the sample logs if the input splitters are given by MatrixWorkspace or a general TableWorkspace.
+- Two new properties were added to :ref:`algm-Integration` *RangeLowerList* and *RangeUpperList* can be used to give histogram-specific integration ranges.
+- :ref:`algm-FindEPP` does not output the two extra workspaces from the :ref:`algm-Fit` anymore.
 
 Bug Fixes
 #########
diff --git a/docs/source/release/v3.10.0/indirect_inelastic.rst b/docs/source/release/v3.10.0/indirect_inelastic.rst
index 33577664826969636183d30c6874eaa96791024c..6daec8eadce8145b1d4858c4ccdd3e8dff0a1146 100644
--- a/docs/source/release/v3.10.0/indirect_inelastic.rst
+++ b/docs/source/release/v3.10.0/indirect_inelastic.rst
@@ -42,8 +42,11 @@ Jump Fit
 
 Improvements
 ------------
+
+- Bayes interfaces have the functionality to plot the current preview in the miniplot
 - OSIRIS diffraction now rebins container workspaces to match the sample workspace
 
+
 Bugfixes
 --------
 
diff --git a/docs/source/release/v3.10.0/reflectometry.rst b/docs/source/release/v3.10.0/reflectometry.rst
index ca6c83ad6446ddb1db3e42e18c20d6e8f70c4354..2942dc90e32f12dbf602705ec57d479a8466f557 100644
--- a/docs/source/release/v3.10.0/reflectometry.rst
+++ b/docs/source/release/v3.10.0/reflectometry.rst
@@ -26,6 +26,11 @@ ISIS Reflectometry
 - Interface `ISIS Reflectometry (Polref)` has been renamed to `ISIS Reflectometry`.
 - Fixed a bug where the contents of the processing table where not saved to the selected table workspace.
 - Fixed a bug when removing rows from the processing table.
+- Fixed shortcuts:
+
+  - Ctrl+C copies the selected row(s) to the clipboard.
+  - Ctrl+V pastes the contents of the clipboard into the selected row(s). If no rows are selected, new ones are added at the end.
+  - Ctrl+X copies the selected row(s) to the clipboard and deletes them.
 
 ISIS Reflectometry (Old)
 ########################
diff --git a/instrument/CNCS_Definition.xml b/instrument/CNCS_Definition.xml
index bf4e89de244d76a0d0922de4869320c1f6565364..aeb474c728c6a63be4808fd93a2245e12714c1ae 100644
--- a/instrument/CNCS_Definition.xml
+++ b/instrument/CNCS_Definition.xml
@@ -1,5 +1,5 @@
 <?xml version='1.0' encoding='ASCII'?>
-<instrument xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.mantidproject.org/IDF/1.0" last-modified="2016-07-19 17:43:18.271852" name="CNCS" valid-from="2016-07-14 00:00:00" valid-to="2100-01-31 23:59:59" xsi:schemaLocation="http://www.mantidproject.org/IDF/1.0 http://schema.mantidproject.org/IDF/1.0/IDFSchema.xsd">
+<instrument xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.mantidproject.org/IDF/1.0" last-modified="2017-03-13 14:25:33.964649" name="CNCS" valid-from="2017-03-14 10:00:00" valid-to="2100-01-31 23:59:59" xsi:schemaLocation="http://www.mantidproject.org/IDF/1.0 http://schema.mantidproject.org/IDF/1.0/IDFSchema.xsd">
   <!--Created by Andrei Savici-->
   <defaults>
     <length unit="metre"/>
@@ -187,350 +187,350 @@
   </type>
   <type name="bank1">
     <component type="eightpack">
-      <location x="2.64164" y="0.0" z="-2.29918">
+      <location x="2.6416" y="0.0" z="-2.29914">
         <rot axis-x="0" axis-y="1" axis-z="0" val="311.035"/>
       </location>
     </component>
   </type>
   <type name="bank2">
     <component type="eightpack">
-      <location x="2.77996" y="0.0" z="-2.1289">
+      <location x="2.78091" y="0.0" z="-2.12963">
         <rot axis-x="0" axis-y="1" axis-z="0" val="307.445"/>
       </location>
     </component>
   </type>
   <type name="bank3">
     <component type="eightpack">
-      <location x="2.90368" y="0.0" z="-1.94788">
+      <location x="2.90499" y="0.0" z="-1.94876">
         <rot axis-x="0" axis-y="1" axis-z="0" val="303.855"/>
       </location>
     </component>
   </type>
   <type name="bank4">
     <component type="eightpack">
-      <location x="3.02334" y="0.0" z="-1.76422">
+      <location x="3.02394" y="0.0" z="-1.76457">
         <rot axis-x="0" axis-y="1" axis-z="0" val="300.265"/>
       </location>
     </component>
   </type>
   <type name="bank5">
     <component type="eightpack">
-      <location x="3.12671" y="0.0" z="-1.57086">
+      <location x="3.12748" y="0.0" z="-1.57125">
         <rot axis-x="0" axis-y="1" axis-z="0" val="296.675"/>
       </location>
     </component>
   </type>
   <type name="bank6">
     <component type="eightpack">
-      <location x="3.22076" y="0.0" z="-1.37277">
+      <location x="3.22181" y="0.0" z="-1.37322">
         <rot axis-x="0" axis-y="1" axis-z="0" val="293.085"/>
       </location>
     </component>
   </type>
   <type name="bank7">
     <component type="eightpack">
-      <location x="3.2988" y="0.0" z="-1.16784">
+      <location x="3.30034" y="0.0" z="-1.16839">
         <rot axis-x="0" axis-y="1" axis-z="0" val="289.495"/>
       </location>
     </component>
   </type>
   <type name="bank8">
     <component type="eightpack">
-      <location x="3.36467" y="0.0" z="-0.95877">
+      <location x="3.36571" y="0.0" z="-0.959066">
         <rot axis-x="0" axis-y="1" axis-z="0" val="285.905"/>
       </location>
     </component>
   </type>
   <type name="bank9">
     <component type="eightpack">
-      <location x="3.41762" y="0.0" z="-0.746098">
+      <location x="3.41936" y="0.0" z="-0.746478">
         <rot axis-x="0" axis-y="1" axis-z="0" val="282.315"/>
       </location>
     </component>
   </type>
   <type name="bank10">
     <component type="eightpack">
-      <location x="3.45829" y="0.0" z="-0.530737">
+      <location x="3.46011" y="0.0" z="-0.531016">
         <rot axis-x="0" axis-y="1" axis-z="0" val="278.725"/>
       </location>
     </component>
   </type>
   <type name="bank11">
     <component type="eightpack">
-      <location x="3.47828" y="0.0" z="-0.31257">
+      <location x="3.47985" y="0.0" z="-0.312712">
         <rot axis-x="0" axis-y="1" axis-z="0" val="275.135"/>
       </location>
     </component>
   </type>
   <type name="bank12">
     <component type="eightpack">
-      <location x="3.49632" y="0.0" z="-0.0943022">
+      <location x="3.49821" y="0.0" z="-0.0943532">
         <rot axis-x="0" axis-y="1" axis-z="0" val="271.545"/>
       </location>
     </component>
   </type>
   <type name="bank13">
     <component type="eightpack">
-      <location x="3.49657" y="0.0" z="0.124852">
+      <location x="3.49866" y="0.0" z="0.124927">
         <rot axis-x="0" axis-y="1" axis-z="0" val="267.955"/>
       </location>
     </component>
   </type>
   <type name="bank14">
     <component type="eightpack">
-      <location x="3.48291" y="0.0" z="0.343651">
+      <location x="3.48465" y="0.0" z="0.343822">
         <rot axis-x="0" axis-y="1" axis-z="0" val="264.365"/>
       </location>
     </component>
   </type>
   <type name="bank15">
     <component type="eightpack">
-      <location x="3.45275" y="0.0" z="0.560769">
+      <location x="3.45477" y="0.0" z="0.561097">
         <rot axis-x="0" axis-y="1" axis-z="0" val="260.775"/>
       </location>
     </component>
   </type>
   <type name="bank16">
     <component type="eightpack">
-      <location x="3.41025" y="0.0" z="0.775728">
+      <location x="3.41226" y="0.0" z="0.776185">
         <rot axis-x="0" axis-y="1" axis-z="0" val="257.185"/>
       </location>
     </component>
   </type>
   <type name="bank17">
     <component type="eightpack">
-      <location x="3.35409" y="0.0" z="0.987481">
+      <location x="3.35668" y="0.0" z="0.988243">
         <rot axis-x="0" axis-y="1" axis-z="0" val="253.595"/>
       </location>
     </component>
   </type>
   <type name="bank18">
     <component type="eightpack">
-      <location x="3.287" y="0.0" z="1.19604">
+      <location x="3.28912" y="0.0" z="1.19682">
         <rot axis-x="0" axis-y="1" axis-z="0" val="250.005"/>
       </location>
     </component>
   </type>
   <type name="bank19">
     <component type="eightpack">
-      <location x="3.20565" y="0.0" z="1.39952">
+      <location x="3.20853" y="0.0" z="1.40077">
         <rot axis-x="0" axis-y="1" axis-z="0" val="246.415"/>
       </location>
     </component>
   </type>
   <type name="bank20">
     <component type="eightpack">
-      <location x="3.11037" y="0.0" z="1.5968">
+      <location x="3.11279" y="0.0" z="1.59804">
         <rot axis-x="0" axis-y="1" axis-z="0" val="242.825"/>
       </location>
     </component>
   </type>
   <type name="bank21">
     <component type="eightpack">
-      <location x="3.00279" y="0.0" z="1.78754">
+      <location x="3.00525" y="0.0" z="1.789">
         <rot axis-x="0" axis-y="1" axis-z="0" val="239.235"/>
       </location>
     </component>
   </type>
   <type name="bank22">
     <component type="eightpack">
-      <location x="2.88688" y="0.0" z="1.97336">
+      <location x="2.88904" y="0.0" z="1.97484">
         <rot axis-x="0" axis-y="1" axis-z="0" val="235.645"/>
       </location>
     </component>
   </type>
   <type name="bank23">
     <component type="eightpack">
-      <location x="2.7564" y="0.0" z="2.14928">
+      <location x="2.75842" y="0.0" z="2.15085">
         <rot axis-x="0" axis-y="1" axis-z="0" val="232.055"/>
       </location>
     </component>
   </type>
   <type name="bank24">
     <component type="eightpack">
-      <location x="2.6163" y="0.0" z="2.31755">
+      <location x="2.61863" y="0.0" z="2.31963">
         <rot axis-x="0" axis-y="1" axis-z="0" val="228.465"/>
       </location>
     </component>
   </type>
   <type name="bank25">
     <component type="eightpack">
-      <location x="2.46526" y="0.0" z="2.47604">
+      <location x="2.46767" y="0.0" z="2.47847">
         <rot axis-x="0" axis-y="1" axis-z="0" val="224.875"/>
       </location>
     </component>
   </type>
   <type name="bank26">
     <component type="eightpack">
-      <location x="2.30798" y="0.0" z="2.6285">
+      <location x="2.30965" y="0.0" z="2.63041">
         <rot axis-x="0" axis-y="1" axis-z="0" val="221.285"/>
       </location>
     </component>
   </type>
   <type name="bank27">
     <component type="eightpack">
-      <location x="2.13751" y="0.0" z="2.76611">
+      <location x="2.1391" y="0.0" z="2.76818">
         <rot axis-x="0" axis-y="1" axis-z="0" val="217.695"/>
       </location>
     </component>
   </type>
   <type name="bank28">
     <component type="eightpack">
-      <location x="1.96117" y="0.0" z="2.89609">
+      <location x="1.96259" y="0.0" z="2.89819">
         <rot axis-x="0" axis-y="1" axis-z="0" val="214.105"/>
       </location>
     </component>
   </type>
   <type name="bank29">
     <component type="eightpack">
-      <location x="1.77362" y="0.0" z="3.00921">
+      <location x="1.7751" y="0.0" z="3.01173">
         <rot axis-x="0" axis-y="1" axis-z="0" val="210.515"/>
       </location>
     </component>
   </type>
   <type name="bank30">
     <component type="eightpack">
-      <location x="1.58276" y="0.0" z="3.11642">
+      <location x="1.58413" y="0.0" z="3.11912">
         <rot axis-x="0" axis-y="1" axis-z="0" val="206.925"/>
       </location>
     </component>
   </type>
   <type name="bank31">
     <component type="eightpack">
-      <location x="1.38449" y="0.0" z="3.20934">
+      <location x="1.38583" y="0.0" z="3.21247">
         <rot axis-x="0" axis-y="1" axis-z="0" val="203.335"/>
       </location>
     </component>
   </type>
   <type name="bank32">
     <component type="eightpack">
-      <location x="1.18034" y="0.0" z="3.28842">
+      <location x="1.18134" y="0.0" z="3.29122">
         <rot axis-x="0" axis-y="1" axis-z="0" val="199.745"/>
       </location>
     </component>
   </type>
   <type name="bank33">
     <component type="eightpack">
-      <location x="0.971948" y="0.0" z="3.3553">
+      <location x="0.972775" y="0.0" z="3.35816">
         <rot axis-x="0" axis-y="1" axis-z="0" val="196.155"/>
       </location>
     </component>
   </type>
   <type name="bank34">
     <component type="eightpack">
-      <location x="0.760162" y="0.0" z="3.41056">
+      <location x="0.760699" y="0.0" z="3.41296">
         <rot axis-x="0" axis-y="1" axis-z="0" val="192.565"/>
       </location>
     </component>
   </type>
   <type name="bank35">
     <component type="eightpack">
-      <location x="0.544736" y="0.0" z="3.44907">
+      <location x="0.545083" y="0.0" z="3.45126">
         <rot axis-x="0" axis-y="1" axis-z="0" val="188.975"/>
       </location>
     </component>
   </type>
   <type name="bank36">
     <component type="eightpack">
-      <location x="0.32797" y="0.0" z="3.47928">
+      <location x="0.328238" y="0.0" z="3.48213">
         <rot axis-x="0" axis-y="1" axis-z="0" val="185.385"/>
       </location>
     </component>
   </type>
   <type name="bank37">
     <component type="eightpack">
-      <location x="-0.327982" y="0.0" z="3.47941">
+      <location x="-0.328155" y="0.0" z="3.48124">
         <rot axis-x="0" axis-y="1" axis-z="0" val="174.615"/>
       </location>
     </component>
   </type>
   <type name="bank38">
     <component type="eightpack">
-      <location x="-0.545353" y="0.0" z="3.45298">
+      <location x="-0.545596" y="0.0" z="3.45451">
         <rot axis-x="0" axis-y="1" axis-z="0" val="171.025"/>
       </location>
     </component>
   </type>
   <type name="bank39">
     <component type="eightpack">
-      <location x="-0.760304" y="0.0" z="3.41119">
+      <location x="-0.760682" y="0.0" z="3.41289">
         <rot axis-x="0" axis-y="1" axis-z="0" val="167.435"/>
       </location>
     </component>
   </type>
   <type name="bank40">
     <component type="eightpack">
-      <location x="-0.972874" y="0.0" z="3.3585">
+      <location x="-0.973318" y="0.0" z="3.36003">
         <rot axis-x="0" axis-y="1" axis-z="0" val="163.845"/>
       </location>
     </component>
   </type>
   <type name="bank41">
     <component type="eightpack">
-      <location x="-1.18123" y="0.0" z="3.29089">
+      <location x="-1.18185" y="0.0" z="3.29262">
         <rot axis-x="0" axis-y="1" axis-z="0" val="160.255"/>
       </location>
     </component>
   </type>
   <type name="bank42">
     <component type="eightpack">
-      <location x="-1.38572" y="0.0" z="3.2122">
+      <location x="-1.38614" y="0.0" z="3.21318">
         <rot axis-x="0" axis-y="1" axis-z="0" val="156.665"/>
       </location>
     </component>
   </type>
   <type name="bank43">
     <component type="eightpack">
-      <location x="-1.58297" y="0.0" z="3.11684">
+      <location x="-1.58367" y="0.0" z="3.11821">
         <rot axis-x="0" axis-y="1" axis-z="0" val="153.075"/>
       </location>
     </component>
   </type>
   <type name="bank44">
     <component type="eightpack">
-      <location x="-1.77637" y="0.0" z="3.01387">
+      <location x="-1.77701" y="0.0" z="3.01496">
         <rot axis-x="0" axis-y="1" axis-z="0" val="149.485"/>
       </location>
     </component>
   </type>
   <type name="bank45">
     <component type="eightpack">
-      <location x="-1.96071" y="0.0" z="2.89542">
+      <location x="-1.9615" y="0.0" z="2.89658">
         <rot axis-x="0" axis-y="1" axis-z="0" val="145.895"/>
       </location>
     </component>
   </type>
   <type name="bank46">
     <component type="eightpack">
-      <location x="-2.13259" y="0.0" z="2.75975">
+      <location x="-2.13385" y="0.0" z="2.76138">
         <rot axis-x="0" axis-y="1" axis-z="0" val="142.305"/>
       </location>
     </component>
   </type>
   <type name="bank47">
     <component type="eightpack">
-      <location x="-2.2989" y="0.0" z="2.61816">
+      <location x="-2.29987" y="0.0" z="2.61927">
         <rot axis-x="0" axis-y="1" axis-z="0" val="138.715"/>
       </location>
     </component>
   </type>
   <type name="bank48">
     <component type="eightpack">
-      <location x="-2.46047" y="0.0" z="2.47123">
+      <location x="-2.46139" y="0.0" z="2.47215">
         <rot axis-x="0" axis-y="1" axis-z="0" val="135.125"/>
       </location>
     </component>
   </type>
   <type name="bank49">
     <component type="eightpack">
-      <location x="-2.59907" y="0.0" z="2.3023">
+      <location x="-2.60046" y="0.0" z="2.30352">
         <rot axis-x="0" axis-y="1" axis-z="0" val="131.535"/>
       </location>
     </component>
   </type>
   <type name="bank50">
     <component type="eightpack">
-      <location x="-2.74097" y="0.0" z="2.13724">
+      <location x="-2.74181" y="0.0" z="2.1379">
         <rot axis-x="0" axis-y="1" axis-z="0" val="127.945"/>
       </location>
     </component>
diff --git a/instrument/CNCS_Definition_20160713_20170314.xml b/instrument/CNCS_Definition_20160713_20170314.xml
new file mode 100644
index 0000000000000000000000000000000000000000..8552f208c550e33b4cd6a75ddb7e856d0f364a60
--- /dev/null
+++ b/instrument/CNCS_Definition_20160713_20170314.xml
@@ -0,0 +1,729 @@
+<?xml version='1.0' encoding='ASCII'?>
+<instrument xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.mantidproject.org/IDF/1.0" last-modified="2016-07-19 17:43:18.271852" name="CNCS" valid-from="2016-07-14 00:00:00" valid-to="2017-03-14 09:59:59" xsi:schemaLocation="http://www.mantidproject.org/IDF/1.0 http://schema.mantidproject.org/IDF/1.0/IDFSchema.xsd">
+  <!--Created by Andrei Savici-->
+  <defaults>
+    <length unit="metre"/>
+    <angle unit="degree"/>
+    <reference-frame>
+      <along-beam axis="z"/>
+      <pointing-up axis="y"/>
+      <handedness val="right"/>
+    </reference-frame>
+  </defaults>
+  <!--SOURCE AND SAMPLE POSITION-->
+  <component type="moderator">
+    <location z="-36.262"/>
+  </component>
+  <type is="Source" name="moderator"/>
+  <component type="sample-position">
+    <location x="0.0" y="0.0" z="0.0"/>
+  </component>
+  <type is="SamplePos" name="sample-position"/>
+  <!--MONITORS-->
+  <component idlist="monitors" type="monitors">
+    <location/>
+  </component>
+  <type name="monitors">
+    <component type="monitor">
+      <location name="monitor1" z="-29.949"/>
+      <location name="monitor2" z="-28.706"/>
+      <location name="monitor3" z="-1.416"/>
+    </component>
+  </type>
+  <component idlist="detectors" type="detectors">
+    <location/>
+  </component>
+  <type name="detectors">
+    <component type="bank1">
+      <location/>
+    </component>
+    <component type="bank2">
+      <location/>
+    </component>
+    <component type="bank3">
+      <location/>
+    </component>
+    <component type="bank4">
+      <location/>
+    </component>
+    <component type="bank5">
+      <location/>
+    </component>
+    <component type="bank6">
+      <location/>
+    </component>
+    <component type="bank7">
+      <location/>
+    </component>
+    <component type="bank8">
+      <location/>
+    </component>
+    <component type="bank9">
+      <location/>
+    </component>
+    <component type="bank10">
+      <location/>
+    </component>
+    <component type="bank11">
+      <location/>
+    </component>
+    <component type="bank12">
+      <location/>
+    </component>
+    <component type="bank13">
+      <location/>
+    </component>
+    <component type="bank14">
+      <location/>
+    </component>
+    <component type="bank15">
+      <location/>
+    </component>
+    <component type="bank16">
+      <location/>
+    </component>
+    <component type="bank17">
+      <location/>
+    </component>
+    <component type="bank18">
+      <location/>
+    </component>
+    <component type="bank19">
+      <location/>
+    </component>
+    <component type="bank20">
+      <location/>
+    </component>
+    <component type="bank21">
+      <location/>
+    </component>
+    <component type="bank22">
+      <location/>
+    </component>
+    <component type="bank23">
+      <location/>
+    </component>
+    <component type="bank24">
+      <location/>
+    </component>
+    <component type="bank25">
+      <location/>
+    </component>
+    <component type="bank26">
+      <location/>
+    </component>
+    <component type="bank27">
+      <location/>
+    </component>
+    <component type="bank28">
+      <location/>
+    </component>
+    <component type="bank29">
+      <location/>
+    </component>
+    <component type="bank30">
+      <location/>
+    </component>
+    <component type="bank31">
+      <location/>
+    </component>
+    <component type="bank32">
+      <location/>
+    </component>
+    <component type="bank33">
+      <location/>
+    </component>
+    <component type="bank34">
+      <location/>
+    </component>
+    <component type="bank35">
+      <location/>
+    </component>
+    <component type="bank36">
+      <location/>
+    </component>
+    <component type="bank37">
+      <location/>
+    </component>
+    <component type="bank38">
+      <location/>
+    </component>
+    <component type="bank39">
+      <location/>
+    </component>
+    <component type="bank40">
+      <location/>
+    </component>
+    <component type="bank41">
+      <location/>
+    </component>
+    <component type="bank42">
+      <location/>
+    </component>
+    <component type="bank43">
+      <location/>
+    </component>
+    <component type="bank44">
+      <location/>
+    </component>
+    <component type="bank45">
+      <location/>
+    </component>
+    <component type="bank46">
+      <location/>
+    </component>
+    <component type="bank47">
+      <location/>
+    </component>
+    <component type="bank48">
+      <location/>
+    </component>
+    <component type="bank49">
+      <location/>
+    </component>
+    <component type="bank50">
+      <location/>
+    </component>
+  </type>
+  <type name="bank1">
+    <component type="eightpack">
+      <location x="2.64164" y="0.0" z="-2.29918">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="311.035"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank2">
+    <component type="eightpack">
+      <location x="2.77996" y="0.0" z="-2.1289">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="307.445"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank3">
+    <component type="eightpack">
+      <location x="2.90368" y="0.0" z="-1.94788">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="303.855"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank4">
+    <component type="eightpack">
+      <location x="3.02334" y="0.0" z="-1.76422">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="300.265"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank5">
+    <component type="eightpack">
+      <location x="3.12671" y="0.0" z="-1.57086">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="296.675"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank6">
+    <component type="eightpack">
+      <location x="3.22076" y="0.0" z="-1.37277">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="293.085"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank7">
+    <component type="eightpack">
+      <location x="3.2988" y="0.0" z="-1.16784">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="289.495"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank8">
+    <component type="eightpack">
+      <location x="3.36467" y="0.0" z="-0.95877">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="285.905"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank9">
+    <component type="eightpack">
+      <location x="3.41762" y="0.0" z="-0.746098">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="282.315"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank10">
+    <component type="eightpack">
+      <location x="3.45829" y="0.0" z="-0.530737">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="278.725"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank11">
+    <component type="eightpack">
+      <location x="3.47828" y="0.0" z="-0.31257">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="275.135"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank12">
+    <component type="eightpack">
+      <location x="3.49632" y="0.0" z="-0.0943022">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="271.545"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank13">
+    <component type="eightpack">
+      <location x="3.49657" y="0.0" z="0.124852">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="267.955"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank14">
+    <component type="eightpack">
+      <location x="3.48291" y="0.0" z="0.343651">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="264.365"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank15">
+    <component type="eightpack">
+      <location x="3.45275" y="0.0" z="0.560769">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="260.775"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank16">
+    <component type="eightpack">
+      <location x="3.41025" y="0.0" z="0.775728">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="257.185"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank17">
+    <component type="eightpack">
+      <location x="3.35409" y="0.0" z="0.987481">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="253.595"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank18">
+    <component type="eightpack">
+      <location x="3.287" y="0.0" z="1.19604">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="250.005"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank19">
+    <component type="eightpack">
+      <location x="3.20565" y="0.0" z="1.39952">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="246.415"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank20">
+    <component type="eightpack">
+      <location x="3.11037" y="0.0" z="1.5968">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="242.825"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank21">
+    <component type="eightpack">
+      <location x="3.00279" y="0.0" z="1.78754">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="239.235"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank22">
+    <component type="eightpack">
+      <location x="2.88688" y="0.0" z="1.97336">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="235.645"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank23">
+    <component type="eightpack">
+      <location x="2.7564" y="0.0" z="2.14928">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="232.055"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank24">
+    <component type="eightpack">
+      <location x="2.6163" y="0.0" z="2.31755">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="228.465"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank25">
+    <component type="eightpack">
+      <location x="2.46526" y="0.0" z="2.47604">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="224.875"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank26">
+    <component type="eightpack">
+      <location x="2.30798" y="0.0" z="2.6285">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="221.285"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank27">
+    <component type="eightpack">
+      <location x="2.13751" y="0.0" z="2.76611">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="217.695"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank28">
+    <component type="eightpack">
+      <location x="1.96117" y="0.0" z="2.89609">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="214.105"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank29">
+    <component type="eightpack">
+      <location x="1.77362" y="0.0" z="3.00921">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="210.515"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank30">
+    <component type="eightpack">
+      <location x="1.58276" y="0.0" z="3.11642">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="206.925"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank31">
+    <component type="eightpack">
+      <location x="1.38449" y="0.0" z="3.20934">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="203.335"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank32">
+    <component type="eightpack">
+      <location x="1.18034" y="0.0" z="3.28842">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="199.745"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank33">
+    <component type="eightpack">
+      <location x="0.971948" y="0.0" z="3.3553">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="196.155"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank34">
+    <component type="eightpack">
+      <location x="0.760162" y="0.0" z="3.41056">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="192.565"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank35">
+    <component type="eightpack">
+      <location x="0.544736" y="0.0" z="3.44907">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="188.975"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank36">
+    <component type="eightpack">
+      <location x="0.32797" y="0.0" z="3.47928">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="185.385"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank37">
+    <component type="eightpack">
+      <location x="-0.327982" y="0.0" z="3.47941">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="174.615"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank38">
+    <component type="eightpack">
+      <location x="-0.545353" y="0.0" z="3.45298">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="171.025"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank39">
+    <component type="eightpack">
+      <location x="-0.760304" y="0.0" z="3.41119">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="167.435"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank40">
+    <component type="eightpack">
+      <location x="-0.972874" y="0.0" z="3.3585">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="163.845"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank41">
+    <component type="eightpack">
+      <location x="-1.18123" y="0.0" z="3.29089">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="160.255"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank42">
+    <component type="eightpack">
+      <location x="-1.38572" y="0.0" z="3.2122">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="156.665"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank43">
+    <component type="eightpack">
+      <location x="-1.58297" y="0.0" z="3.11684">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="153.075"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank44">
+    <component type="eightpack">
+      <location x="-1.77637" y="0.0" z="3.01387">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="149.485"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank45">
+    <component type="eightpack">
+      <location x="-1.96071" y="0.0" z="2.89542">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="145.895"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank46">
+    <component type="eightpack">
+      <location x="-2.13259" y="0.0" z="2.75975">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="142.305"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank47">
+    <component type="eightpack">
+      <location x="-2.2989" y="0.0" z="2.61816">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="138.715"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank48">
+    <component type="eightpack">
+      <location x="-2.46047" y="0.0" z="2.47123">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="135.125"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank49">
+    <component type="eightpack">
+      <location x="-2.59907" y="0.0" z="2.3023">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="131.535"/>
+      </location>
+    </component>
+  </type>
+  <type name="bank50">
+    <component type="eightpack">
+      <location x="-2.74097" y="0.0" z="2.13724">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="127.945"/>
+      </location>
+    </component>
+  </type>
+  <!--STANDARD 8-PACK-->
+  <type name="eightpack">
+    <properties/>
+    <component type="tube">
+      <location name="tube1" x="-0.096012"/>
+      <location name="tube2" x="-0.06858"/>
+      <location name="tube3" x="-0.041148"/>
+      <location name="tube4" x="-0.013716"/>
+      <location name="tube5" x="0.013716"/>
+      <location name="tube6" x="0.041148"/>
+      <location name="tube7" x="0.06858"/>
+      <location name="tube8" x="0.096012"/>
+    </component>
+  </type>
+  <!--STANDARD 2m 128 PIXEL TUBE-->
+  <type name="tube" outline="yes">
+    <properties/>
+    <component type="pixel">
+      <location name="pixel1" y="-0.9921875"/>
+      <location name="pixel2" y="-0.9765625"/>
+      <location name="pixel3" y="-0.9609375"/>
+      <location name="pixel4" y="-0.9453125"/>
+      <location name="pixel5" y="-0.9296875"/>
+      <location name="pixel6" y="-0.9140625"/>
+      <location name="pixel7" y="-0.8984375"/>
+      <location name="pixel8" y="-0.8828125"/>
+      <location name="pixel9" y="-0.8671875"/>
+      <location name="pixel10" y="-0.8515625"/>
+      <location name="pixel11" y="-0.8359375"/>
+      <location name="pixel12" y="-0.8203125"/>
+      <location name="pixel13" y="-0.8046875"/>
+      <location name="pixel14" y="-0.7890625"/>
+      <location name="pixel15" y="-0.7734375"/>
+      <location name="pixel16" y="-0.7578125"/>
+      <location name="pixel17" y="-0.7421875"/>
+      <location name="pixel18" y="-0.7265625"/>
+      <location name="pixel19" y="-0.7109375"/>
+      <location name="pixel20" y="-0.6953125"/>
+      <location name="pixel21" y="-0.6796875"/>
+      <location name="pixel22" y="-0.6640625"/>
+      <location name="pixel23" y="-0.6484375"/>
+      <location name="pixel24" y="-0.6328125"/>
+      <location name="pixel25" y="-0.6171875"/>
+      <location name="pixel26" y="-0.6015625"/>
+      <location name="pixel27" y="-0.5859375"/>
+      <location name="pixel28" y="-0.5703125"/>
+      <location name="pixel29" y="-0.5546875"/>
+      <location name="pixel30" y="-0.5390625"/>
+      <location name="pixel31" y="-0.5234375"/>
+      <location name="pixel32" y="-0.5078125"/>
+      <location name="pixel33" y="-0.4921875"/>
+      <location name="pixel34" y="-0.4765625"/>
+      <location name="pixel35" y="-0.4609375"/>
+      <location name="pixel36" y="-0.4453125"/>
+      <location name="pixel37" y="-0.4296875"/>
+      <location name="pixel38" y="-0.4140625"/>
+      <location name="pixel39" y="-0.3984375"/>
+      <location name="pixel40" y="-0.3828125"/>
+      <location name="pixel41" y="-0.3671875"/>
+      <location name="pixel42" y="-0.3515625"/>
+      <location name="pixel43" y="-0.3359375"/>
+      <location name="pixel44" y="-0.3203125"/>
+      <location name="pixel45" y="-0.3046875"/>
+      <location name="pixel46" y="-0.2890625"/>
+      <location name="pixel47" y="-0.2734375"/>
+      <location name="pixel48" y="-0.2578125"/>
+      <location name="pixel49" y="-0.2421875"/>
+      <location name="pixel50" y="-0.2265625"/>
+      <location name="pixel51" y="-0.2109375"/>
+      <location name="pixel52" y="-0.1953125"/>
+      <location name="pixel53" y="-0.1796875"/>
+      <location name="pixel54" y="-0.1640625"/>
+      <location name="pixel55" y="-0.1484375"/>
+      <location name="pixel56" y="-0.1328125"/>
+      <location name="pixel57" y="-0.1171875"/>
+      <location name="pixel58" y="-0.1015625"/>
+      <location name="pixel59" y="-0.0859375"/>
+      <location name="pixel60" y="-0.0703125"/>
+      <location name="pixel61" y="-0.0546875"/>
+      <location name="pixel62" y="-0.0390625"/>
+      <location name="pixel63" y="-0.0234375"/>
+      <location name="pixel64" y="-0.0078125"/>
+      <location name="pixel65" y="0.0078125"/>
+      <location name="pixel66" y="0.0234375"/>
+      <location name="pixel67" y="0.0390625"/>
+      <location name="pixel68" y="0.0546875"/>
+      <location name="pixel69" y="0.0703125"/>
+      <location name="pixel70" y="0.0859375"/>
+      <location name="pixel71" y="0.1015625"/>
+      <location name="pixel72" y="0.1171875"/>
+      <location name="pixel73" y="0.1328125"/>
+      <location name="pixel74" y="0.1484375"/>
+      <location name="pixel75" y="0.1640625"/>
+      <location name="pixel76" y="0.1796875"/>
+      <location name="pixel77" y="0.1953125"/>
+      <location name="pixel78" y="0.2109375"/>
+      <location name="pixel79" y="0.2265625"/>
+      <location name="pixel80" y="0.2421875"/>
+      <location name="pixel81" y="0.2578125"/>
+      <location name="pixel82" y="0.2734375"/>
+      <location name="pixel83" y="0.2890625"/>
+      <location name="pixel84" y="0.3046875"/>
+      <location name="pixel85" y="0.3203125"/>
+      <location name="pixel86" y="0.3359375"/>
+      <location name="pixel87" y="0.3515625"/>
+      <location name="pixel88" y="0.3671875"/>
+      <location name="pixel89" y="0.3828125"/>
+      <location name="pixel90" y="0.3984375"/>
+      <location name="pixel91" y="0.4140625"/>
+      <location name="pixel92" y="0.4296875"/>
+      <location name="pixel93" y="0.4453125"/>
+      <location name="pixel94" y="0.4609375"/>
+      <location name="pixel95" y="0.4765625"/>
+      <location name="pixel96" y="0.4921875"/>
+      <location name="pixel97" y="0.5078125"/>
+      <location name="pixel98" y="0.5234375"/>
+      <location name="pixel99" y="0.5390625"/>
+      <location name="pixel100" y="0.5546875"/>
+      <location name="pixel101" y="0.5703125"/>
+      <location name="pixel102" y="0.5859375"/>
+      <location name="pixel103" y="0.6015625"/>
+      <location name="pixel104" y="0.6171875"/>
+      <location name="pixel105" y="0.6328125"/>
+      <location name="pixel106" y="0.6484375"/>
+      <location name="pixel107" y="0.6640625"/>
+      <location name="pixel108" y="0.6796875"/>
+      <location name="pixel109" y="0.6953125"/>
+      <location name="pixel110" y="0.7109375"/>
+      <location name="pixel111" y="0.7265625"/>
+      <location name="pixel112" y="0.7421875"/>
+      <location name="pixel113" y="0.7578125"/>
+      <location name="pixel114" y="0.7734375"/>
+      <location name="pixel115" y="0.7890625"/>
+      <location name="pixel116" y="0.8046875"/>
+      <location name="pixel117" y="0.8203125"/>
+      <location name="pixel118" y="0.8359375"/>
+      <location name="pixel119" y="0.8515625"/>
+      <location name="pixel120" y="0.8671875"/>
+      <location name="pixel121" y="0.8828125"/>
+      <location name="pixel122" y="0.8984375"/>
+      <location name="pixel123" y="0.9140625"/>
+      <location name="pixel124" y="0.9296875"/>
+      <location name="pixel125" y="0.9453125"/>
+      <location name="pixel126" y="0.9609375"/>
+      <location name="pixel127" y="0.9765625"/>
+      <location name="pixel128" y="0.9921875"/>
+    </component>
+  </type>
+  <!--PIXEL FOR STANDARD 2m 128 PIXEL TUBE-->
+  <type is="detector" name="pixel">
+    <cylinder id="cyl-approx">
+      <centre-of-bottom-base p="0.0" r="0.0" t="0.0"/>
+      <axis x="0.0" y="1.0" z="0.0"/>
+      <radius val="0.0127"/>
+      <height val="0.015625"/>
+    </cylinder>
+    <algebra val="cyl-approx"/>
+  </type>
+  <!--MONITOR SHAPE-->
+  <!--FIXME: Do something real here.-->
+  <type is="monitor" name="monitor">
+    <cylinder id="cyl-approx">
+      <centre-of-bottom-base p="0.0" r="0.0" t="0.0"/>
+      <axis x="0.0" y="0.0" z="1.0"/>
+      <radius val="0.01"/>
+      <height val="0.03"/>
+    </cylinder>
+    <algebra val="cyl-approx"/>
+  </type>
+  <!--DETECTOR IDs-->
+  <idlist idname="detectors">
+    <id end="51199" start="0"/>
+  </idlist>
+  <!--MONITOR IDs-->
+  <idlist idname="monitors">
+    <id val="-1"/>
+    <id val="-2"/>
+    <id val="-3"/>
+  </idlist>
+  <!--DETECTOR PARAMETERS-->
+  <component-link name="detectors">
+    <parameter name="tube_pressure">
+      <value units="atm" val="6.0"/>
+    </parameter>
+    <parameter name="tube_thickness">
+      <value units="metre" val="0.0008"/>
+    </parameter>
+    <parameter name="tube_temperature">
+      <value units="K" val="290.0"/>
+    </parameter>
+  </component-link>
+</instrument>
diff --git a/scripts/SANS/sans/README.md b/scripts/SANS/sans/README.md
index 8b7a21d65a7ae82c7c26fd2999589358d779a71c..4c54bd44384b903ffdc0cfb40139d94011231328 100644
--- a/scripts/SANS/sans/README.md
+++ b/scripts/SANS/sans/README.md
@@ -3,10 +3,19 @@
 The `sans` package contains the elements of the second version of the ISIS SANS reduction, except for Python algorithms
 which can be found in the `WorkflowAlgorithm` section of Mantid's `PythonInterface`.
 
+## `algorithm_detail`
+
+This package contains implementation details of some SANS-specific algorithms which are defined in mantid/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS
+
+
 ## `common`
 
-The elements in the common package include widely used general purpose functions, constants and SANS-wide type definitions.
+The elements in the `common` package include widely used general purpose functions, constants and SANS-wide type definitions.
 
 ## `state`
 
-The elements in the `state` package contain the definition of the reduction configuration and the corresponding builders.
\ No newline at end of file
+The elements in the `state` package contain the definition of the reduction configuration and the corresponding builders.
+
+## `user_file`
+
+This elements of this package are used to parse a SANS user file and to setup a state object from the specified settings.
\ No newline at end of file
diff --git a/scripts/SANS/sans/algorithm_detail/__init__.py b/scripts/SANS/sans/algorithm_detail/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/scripts/SANS/sans/algorithm_detail/calibration.py b/scripts/SANS/sans/algorithm_detail/calibration.py
new file mode 100644
index 0000000000000000000000000000000000000000..d857910ff3371023a383e13a9ea66be95e862d40
--- /dev/null
+++ b/scripts/SANS/sans/algorithm_detail/calibration.py
@@ -0,0 +1,264 @@
+# pylint: disable=invalid-name
+
+""" Handles calibration of SANS workspaces."""
+from os.path import (basename, splitext, isfile)
+from mantid.api import (AnalysisDataService)
+
+from sans.common.file_information import find_full_file_path
+from sans.common.constants import (EMPTY_NAME, CALIBRATION_WORKSPACE_TAG)
+from sans.common.log_tagger import (has_tag, get_tag, set_tag)
+from sans.common.general_functions import create_unmanaged_algorithm
+from sans.common.enums import SANSDataType
+
+
+# -----------------------------
+#  Free functions for Calibration
+# -----------------------------
+def has_calibration_already_been_applied(workspace, full_file_path):
+    """
+    Checks if particular calibration, defined by the file path has been applied to a workspace.
+
+    :param workspace: The workspace which might have been calibrated
+    :param full_file_path: An absolute file path to the calibration file.
+    :return: True if the calibration has been applied else False
+    """
+    has_calibration_applied = False
+    if has_tag(CALIBRATION_WORKSPACE_TAG, workspace):
+        value = get_tag(CALIBRATION_WORKSPACE_TAG, workspace)
+        has_calibration_applied = value == full_file_path
+    return has_calibration_applied
+
+
+def add_calibration_tag_to_workspace(workspace, full_file_path):
+    """
+    Adds a calibration tag to the workspace, which is the file path to the calibration file
+
+    This is used to determine if a calibration (and if the correct calibration) has been applied to a workspace.
+    :param workspace: the workspace to which the calibration tag is added
+    :param full_file_path: the full file path to the calibration file
+    """
+    set_tag(CALIBRATION_WORKSPACE_TAG, full_file_path, workspace)
+
+
+def get_expected_calibration_workspace_name(full_file_path):
+    """
+    Gets the name of the calibration file.
+
+    :param full_file_path: the full file path to the calibration file.
+    :return: the calibration file name.
+    """
+    truncated_path = basename(full_file_path)
+    file_name, _ = splitext(truncated_path)
+    return file_name
+
+
+def get_already_loaded_calibration_workspace(full_file_path):
+    """
+    Gets a calibration workspace from the ADS if it exists.
+
+    :param full_file_path: the full file path to the calibration workspace
+    :return: a handle to the calibration workspace or None
+    """
+    calibration_workspace_name = get_expected_calibration_workspace_name(full_file_path)
+    if AnalysisDataService.doesExist(calibration_workspace_name):
+        output_ws = AnalysisDataService.retrieve(calibration_workspace_name)
+    else:
+        output_ws = None
+    return output_ws
+
+
+def get_calibration_workspace(full_file_path, use_loaded):
+    """
+    Load the calibration workspace from the specified file
+
+    :param full_file_path: Path to the calibration file.
+    :param use_loaded: Allows us to check for the calibration file on the ADS.
+    :return: the calibration workspace.
+    """
+    calibration_workspace = None
+    # Here we can avoid reloading of the calibration workspace
+    if use_loaded:
+        calibration_workspace = get_already_loaded_calibration_workspace(full_file_path)
+
+    if calibration_workspace is None:
+        if not isfile(full_file_path):
+            raise RuntimeError("SANSCalibration: The file for  {0} does not seem to exist".format(full_file_path))
+        loader_name = "LoadNexusProcessed"
+        loader_options = {"Filename": full_file_path,
+                          "OutputWorkspace": "dummy"}
+        loader = create_unmanaged_algorithm(loader_name, **loader_options)
+        loader.execute()
+        calibration_workspace = loader.getProperty("OutputWorkspace").value
+
+    return calibration_workspace
+
+
+def get_cloned_calibration_workspace(calibration_workspace):
+    """
+    Creates a clone from a calibration workspace, in order to consume it later.
+
+    :param calibration_workspace: the calibration workspace which is being cloned
+    :return: a cloned calibration workspace
+    """
+    clone_name = "CloneWorkspace"
+    clone_options = {"InputWorkspace": calibration_workspace,
+                     "OutputWorkspace": EMPTY_NAME}
+    alg = create_unmanaged_algorithm(clone_name, **clone_options)
+    alg.execute()
+    return alg.getProperty("OutputWorkspace").value
+
+
+def get_missing_parameters(calibration_workspace, workspace):
+    """
+    Get a list of missing parameter names which are on the data workspace but not on the calibration workspace.
+
+    :param calibration_workspace: the calibration workspace
+    :param workspace: the data workspace (which is to be calibrated later on).
+    :return: a list of parameters which exist on the data workspace but not on the calibration workspace.
+    """
+    original_parameter_names = workspace.getInstrument().getParameterNames()
+    calibration_workspace_instrument = calibration_workspace.getInstrument()
+    missing_parameter_names = []
+    for parameter in original_parameter_names:
+        if not calibration_workspace_instrument.hasParameter(parameter):
+            missing_parameter_names.append(parameter)
+    return missing_parameter_names
+
+
+def apply_missing_parameters(calibration_workspace, workspace, missing_parameters):
+    """
+    Transfers missing properties from the data workspace to the calibration workspace.
+
+    :param calibration_workspace: the calibration workspace.
+    :param workspace: the data workspace.
+    :param missing_parameters: a list of missing parameters which exist on the data workspace but not on the calibration
+                               workspace.
+    """
+    instrument = workspace.getInstrument()
+    component_name = instrument.getName()
+    set_instrument_parameter_options = {"Workspace": calibration_workspace,
+                                        "ComponentName": component_name}
+    alg = create_unmanaged_algorithm("SetInstrumentParameter", **set_instrument_parameter_options)
+
+    # For now only string, int and double are handled
+    type_options = {"string": "String", "int": "Number", "double": "Number"}
+    value_options = {"string": instrument.getStringParameter,
+                     "int": instrument.getIntParameter,
+                     "double": instrument.getNumberParameter}
+    try:
+        for missing_parameter in missing_parameters:
+            parameter_type = instrument.getParameterType(missing_parameter)
+            type_to_save = type_options[parameter_type]
+            value = value_options[parameter_type](missing_parameter)
+
+            alg.setProperty("ParameterName", missing_parameter)
+            alg.setProperty("ParameterType", type_to_save)
+            alg.setProperty("Value", str(value[0]))
+    except KeyError:
+        raise RuntimeError("SANSCalibration: An Instrument Parameter File value of unknown type"
+                           "was going to be copied. Cannot handle this currently.")
+
+
+def calibrate(calibration_workspace, workspace_to_calibrate):
+    """
+    Performs a calibration. The instrument parameters are copied from the calibration workspace to the data workspace.
+
+    :param calibration_workspace: the calibration workspace
+    :param workspace_to_calibrate: the workspace which has the calibration applied to it.
+    """
+    copy_instrument_name = "CopyInstrumentParameters"
+    copy_instrument_options = {"InputWorkspace": calibration_workspace,
+                               "OutputWorkspace": workspace_to_calibrate}
+    alg = create_unmanaged_algorithm(copy_instrument_name, **copy_instrument_options)
+    alg.execute()
+
+
+def add_to_ads(calibration_workspace, full_file_path):
+    """
+    Add the calibration file to the ADS. The file name is used to publish it to the ADS.
+
+    :param calibration_workspace: the calibration file which is to be published.
+    :param full_file_path: the file path to the calibration file.
+    """
+    calibration_workspace_name = get_expected_calibration_workspace_name(full_file_path)
+    AnalysisDataService.addOrReplace(calibration_workspace_name, calibration_workspace)
+
+
+def do_apply_calibration(full_file_path, workspaces_to_calibrate, use_loaded, publish_to_ads):
+    """
+    Applies calibration to a data workspace
+
+    :param full_file_path: the file path to the calibration file.
+    :param workspaces_to_calibrate: the workspace which is to be calibrated.
+    :param use_loaded: if already loaded calibration workspace is to be used.
+    :param publish_to_ads: if calibration workspace is to be added to the ADS.
+    """
+    # Load calibration workspace
+    calibration_workspace = get_calibration_workspace(full_file_path, use_loaded)
+
+    # Check if the workspace has a calibration already applied to it and if it coincides with the
+    # provided calibration file
+    for workspaces in list(workspaces_to_calibrate.values()):
+        # If it is already calibrated then don't do anything, We only need to check the first element, even for
+        # GroupWorkspaces
+        if has_calibration_already_been_applied(workspaces[0], full_file_path):
+            continue
+
+        # Apply calibration to workspace
+        # This means that we copy the Parameter Map (PM) from the calibration workspace to the
+        # actual data workspace. Entries which only exist in the data workspace would be lost, hence we need to
+        # add the missing entries  to the PM of the calibration workspace. Once the calibration has been
+        # applied, we don't want these extra parameters to avoid data-cross-talk between different data sets. If
+        # we used a workspace from the ADS or intend to publish to the ADS, we will be working with a cloned calibration
+        # workspace in order to avoid this cross-talk. The calibration workspace sizes allow for very fast in-memory
+        # cloning.
+        has_been_published = False
+        for workspace in workspaces:
+            if use_loaded or publish_to_ads:
+                calibration_workspace_to_use = get_cloned_calibration_workspace(calibration_workspace)
+            else:
+                calibration_workspace_to_use = calibration_workspace
+            missing_parameters = get_missing_parameters(calibration_workspace_to_use, workspace)
+            apply_missing_parameters(calibration_workspace_to_use, workspace, missing_parameters)
+            calibrate(calibration_workspace_to_use, workspace)
+
+            # Publish to ADS if requested
+            if publish_to_ads and not has_been_published:
+                add_to_ads(calibration_workspace, full_file_path)
+                has_been_published = True
+
+            # Add calibration tag to workspace
+            add_calibration_tag_to_workspace(workspace, full_file_path)
+
+
+def apply_calibration(calibration_file_name, workspaces, monitor_workspaces, use_loaded, publish_to_ads):
+    """
+    Apply (tube) calibration to scatter workspaces and corresponding monitor workspaces.
+
+    :param calibration_file_name: the file name of the calibration file.
+    :param workspaces: a map with scatter workspaces for sample and can
+    :param monitor_workspaces: a map with scatter monitor workspaces for sample and can
+    :param use_loaded: if calibration file from ADS is to be used (if it exists)
+    :param publish_to_ads: if the calibration file should be published to the ADS
+    :return:
+    """
+    full_file_path = find_full_file_path(calibration_file_name)
+
+    # Check for the sample scatter and the can scatter workspaces
+    workspaces_to_calibrate = {}
+    if SANSDataType.SampleScatter in workspaces:
+        workspaces_to_calibrate.update({SANSDataType.SampleScatter: workspaces[SANSDataType.SampleScatter]})
+    if SANSDataType.CanScatter in workspaces:
+        workspaces_to_calibrate.update({SANSDataType.CanScatter: workspaces[SANSDataType.CanScatter]})
+    do_apply_calibration(full_file_path, workspaces_to_calibrate, use_loaded, publish_to_ads)
+
+    # Check for the sample scatter and the can scatter workspaces monitors
+    workspace_monitors_to_calibrate = {}
+    if SANSDataType.SampleScatter in monitor_workspaces:
+        workspace_monitors_to_calibrate.update({SANSDataType.SampleScatter:
+                                                monitor_workspaces[SANSDataType.SampleScatter]})
+    if SANSDataType.CanScatter in monitor_workspaces:
+        workspace_monitors_to_calibrate.update({SANSDataType.CanScatter:
+                                                monitor_workspaces[SANSDataType.CanScatter]})
+    do_apply_calibration(full_file_path, workspace_monitors_to_calibrate,
+                         use_loaded, publish_to_ads)
diff --git a/scripts/SANS/sans/algorithm_detail/load_data.py b/scripts/SANS/sans/algorithm_detail/load_data.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab157ed2cd951c794a3b39874a5ff02c07d4f714
--- /dev/null
+++ b/scripts/SANS/sans/algorithm_detail/load_data.py
@@ -0,0 +1,760 @@
+# pylint: disable=too-few-public-methods, invalid-name, fixme, unused-argument
+# pylint: disable=R0922e
+""" Implementation for the SANSLoad algorithm
+
+This module contains the loading strategies for the currently supported files. Data can be loaded as the
+SCATTER ENTRY of the reduction or the TRANSMISSION/DIRECT ENTRY of the reduction
+
+SCATTER ENTRY:
+The data of the scatter entry  is loaded as two parts -- the scatter data and the monitors. In the
+old version this was loaded together and then split later on, which was very inefficient.
+
+TRANSMISSION/DIRECT ENTRY:
+The data is expected to be of histogram-type. The data is loaded as a whole. Most reductions will only use the
+monitors but some machines can use a region on the detector as a monitor substitute.
+
+The data can be loaded from:
+
+Standard Nexus-based files directly from the machine:
+    Histogram-based file
+    Event-based file (only for SCATTER ENTRY)
+    Multi-period histogram-based file
+    Multi-period event-based file (only for SCATTER ENTRY)
+    Single period selection of a multi-period histogram-based file
+    Single period selection of a multi-period event-based file (only for SCATTER ENTRY)
+
+Added nexus-based files:
+    Added histogram-based file
+    Added event-based file (only for SCATTER ENTRY)
+    Added multi-period histogram-based file
+    Added multi-period event-based file (only for SCATTER ENTRY)
+    Single period selection of an added multi-period histogram-based file
+    Single period selection of an added multi-period event-based file (only for SCATTER ENTRY)
+
+Standard Raw-based files directly from the machine:
+    Histogram-based file
+    Multi-period histogram-based file
+    Single period selection of a multi-period histogram-based file
+
+
+CACHING:
+Adding to the cache(ADS) is supported for the TubeCalibration file.
+Reading from the cache is supported for all files. This avoids data reloads if the correct file is already in the
+cache.
+"""
+
+from abc import (ABCMeta, abstractmethod)
+from six import with_metaclass
+from mantid.api import (AnalysisDataService)
+from sans.common.file_information import (SANSFileInformationFactory, FileType, get_extension_for_file_type,
+                                          find_full_file_path)
+from sans.common.constants import (EMPTY_NAME, SANS_SUFFIX, TRANS_SUFFIX, MONITOR_SUFFIX, CALIBRATION_WORKSPACE_TAG,
+                                   SANS_FILE_TAG, OUTPUT_WORKSPACE_GROUP, OUTPUT_MONITOR_WORKSPACE,
+                                   OUTPUT_MONITOR_WORKSPACE_GROUP)
+from sans.common.enums import (SANSInstrument, SANSDataType)
+from sans.common.general_functions import (create_unmanaged_algorithm)
+from sans.common.log_tagger import (set_tag, has_tag, get_tag)
+from sans.state.data import (StateData)
+from sans.algorithm_detail.calibration import apply_calibration
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# General functions
+# ----------------------------------------------------------------------------------------------------------------------
+def update_file_information(file_information_dict, factory, data_type, file_name):
+    info = factory.create_sans_file_information(file_name)
+    file_information_dict.update({data_type: info})
+
+
+def get_file_and_period_information_from_data(data):
+    """
+    Get the file information and period information from a StateData object
+
+    :param data: a StateData object
+    :return: a map of SANSFileInformation objects and map of period information
+    """
+    file_information_factory = SANSFileInformationFactory()
+    file_information = dict()
+    period_information = dict()
+    if data.sample_scatter is not None:
+        update_file_information(file_information, file_information_factory,
+                                SANSDataType.SampleScatter, data.sample_scatter)
+        period_information.update({SANSDataType.SampleScatter: data.sample_scatter_period})
+    if data.sample_transmission is not None:
+        update_file_information(file_information, file_information_factory,
+                                SANSDataType.SampleTransmission, data.sample_transmission)
+        period_information.update({SANSDataType.SampleTransmission: data.sample_transmission_period})
+    if data.sample_direct is not None:
+        update_file_information(file_information, file_information_factory,
+                                SANSDataType.SampleDirect, data.sample_direct)
+        period_information.update({SANSDataType.SampleDirect: data.sample_direct_period})
+    if data.can_scatter is not None:
+        update_file_information(file_information, file_information_factory,
+                                SANSDataType.CanScatter, data.can_scatter)
+        period_information.update({SANSDataType.CanScatter: data.can_scatter_period})
+    if data.can_transmission is not None:
+        update_file_information(file_information, file_information_factory,
+                                SANSDataType.CanTransmission, data.can_transmission)
+        period_information.update({SANSDataType.CanTransmission: data.can_transmission_period})
+    if data.can_direct is not None:
+        update_file_information(file_information, file_information_factory,
+                                SANSDataType.CanDirect, data.can_direct)
+        period_information.update({SANSDataType.CanDirect: data.can_direct_period})
+    return file_information, period_information
+
+
+def is_transmission_type(to_check):
+    """
+    Checks if a SANSDataType object is of transmission type.
+
+    Transmission type data are transmission and direct files.
+    :param to_check: A SANSDataType object.
+    :return: true if the SANSDataType object is a transmission object (transmission or direct) else false.
+    """
+    if ((to_check is SANSDataType.SampleTransmission) or (to_check is SANSDataType.SampleDirect) or
+            (to_check is SANSDataType.CanTransmission) or (to_check is SANSDataType.CanDirect)):
+        is_transmission = True
+    else:
+        is_transmission = False
+    return is_transmission
+
+
+def get_expected_file_tags(file_information, is_transmission, period):
+    """
+    Creates the expected file tags for SANS workspaces.
+
+    :param file_information: a file information object
+    :param is_transmission: if the file information is for a transmission or not
+    :param period: the period of interest
+    :return: a list of file tags
+    """
+    suffix_file_type = get_extension_for_file_type(file_information)
+    if is_transmission:
+        suffix_data = TRANS_SUFFIX
+    else:
+        suffix_data = SANS_SUFFIX
+    file_path = file_information.get_file_name()
+
+    # Three possibilities:
+    #  1. No period data => 22024_sans_nxs
+    #  2. Period data, but wants all => 22025p1_sans_nxs,  22025p2_sans_nxs, ...
+    #  3. Period data, select particular period => 22025p3_sans_nxs
+    if file_information.get_number_of_periods() == 1:
+        file_tag_name = "{0}_{1}_{2}".format(file_path, suffix_data, suffix_file_type)
+        names = [file_tag_name]
+    elif file_information.get_number_of_periods() > 1 and period is StateData.ALL_PERIODS:
+        file_tag_names = []
+        for period in range(1, file_information.get_number_of_periods() + 1):
+            file_tag_names.append("{0}p{1}_{2}_{3}".format(file_path, period, suffix_data, suffix_file_type))
+        names = file_tag_names
+    elif file_information.get_number_of_periods() > 1 and period is not StateData.ALL_PERIODS:
+        file_tag_name = "{0}p{1}_{2}_{3}".format(file_path, period, suffix_data, suffix_file_type)
+        names = [file_tag_name]
+    else:
+        raise RuntimeError("SANSLoad: Cannot create workspace names.")
+    return names
+
+
+def is_data_transmission_and_event_mode(file_infos):
+    """
+    Checks if a file is used as a transmission workspace and contains event-mode data. This is not allowed.
+
+    @param file_infos: a dict of DataType vs FileInformation objects
+    @return: True if the file setting is bad else False
+    """
+    is_bad_file_setting = False
+    for key, value in list(file_infos.items()):
+        if is_transmission_type(key) and value.is_event_mode():
+            is_bad_file_setting = True
+            break
+    return is_bad_file_setting
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Caching workspaces
+# ----------------------------------------------------------------------------------------------------------------------
+def add_workspaces_to_analysis_data_service(workspaces, workspace_names, is_monitor):
+    """
+    Adds a list of workspaces to the ADS.
+
+    :param workspaces: list of workspaces
+    :param workspace_names: the names under which they are to be published
+    :param is_monitor: if the workspace is a monitor or not
+    """
+    if is_monitor:
+        workspace_names = [workspace_name + MONITOR_SUFFIX for workspace_name in workspace_names]
+    if len(workspaces) != len(workspace_names):
+        raise RuntimeError("SANSLoad: There is a mismatch between the generated names and the length of"
+                           " the WorkspaceGroup. The workspace has {0} entries and there are {1} "
+                           "workspace names".format(len(workspaces), len(workspace_names)))
+
+    for index in range(0, len(workspaces)):
+        if not AnalysisDataService.doesExist(workspace_names[index]):
+            AnalysisDataService.addOrReplace(workspace_names[index], workspaces[index])
+
+
+def publish_workspaces_to_analysis_data_service(workspaces, workspace_monitors, workspace_names):
+    """
+    Publish data workspaces and monitor workspaces to the ADS.
+
+    :param workspaces: a list of data workspaces (scatter, transmission, direct)
+    :param workspace_monitors:  a list of monitor workspaces
+    :param workspace_names: the workspace names
+    :return:
+    """
+    add_workspaces_to_analysis_data_service(workspaces, workspace_names, is_monitor=False)
+
+    # If the workspace monitor exists, then add it to the ADS as well
+    if workspace_monitors:
+        add_workspaces_to_analysis_data_service(workspace_monitors, workspace_names, is_monitor=True)
+
+
+def has_loaded_correctly_from_ads(file_information, workspaces, period):
+    """
+    Checks if the workspaces which were supposed to be loaded from the ADS were loaded.
+
+    This might be important for multi-period data.
+    :param file_information: A SANSFileInformation object.
+    :param workspaces: a list of workspaces.
+    :param period: the selected period
+    :return: true if loading from the ADS was successful else false.
+    """
+    number_of_workspaces = len(workspaces)
+    number_of_periods = file_information.get_number_of_periods()
+
+    # Different cases: single-period, multi-period, multi-period with one period selected
+    if number_of_periods == 1:
+        is_valid = True if number_of_workspaces == 1 else False
+    elif number_of_periods > 1 and period is not StateData.ALL_PERIODS:
+        is_valid = True if number_of_workspaces == 1 else False
+    elif number_of_periods > 1 and period is StateData.ALL_PERIODS:
+        is_valid = True if number_of_workspaces == number_of_periods else False
+    else:
+        raise RuntimeError("SANSLoad: Loading data from the ADS has resulted in the a mismatch between the number of "
+                           "period information and the number of loaded workspaces")
+    return is_valid
+
+
+def is_calibration_correct(workspace, calibration_file):
+    is_correct = True
+    if has_tag(CALIBRATION_WORKSPACE_TAG, workspace):
+        is_correct = calibration_file == get_tag(CALIBRATION_WORKSPACE_TAG, workspace)
+    return is_correct
+
+
+def get_workspaces_from_ads_if_exist(file_tags, full_calibration_file_path, workspaces):
+    """
+    Retrieves workspaces from the ADS depending on their file tags and calibration file tags which would have been
+    set by the sans loading mechanism when they were loaded the first time.
+
+    :param file_tags: a list of file tags which we look for on the workspaces on the ADS
+    :param full_calibration_file_path: the calibration file name which we look for on the workspaces on the ADS
+    :param workspaces: a list of workspaces which is being updated in this function.
+    """
+    for workspace_name in AnalysisDataService.getObjectNames():
+        workspace = AnalysisDataService.retrieve(workspace_name)
+        try:
+            if has_tag(SANS_FILE_TAG, workspace):
+                file_tag = get_tag(SANS_FILE_TAG, workspace)
+                if file_tag in file_tags and is_calibration_correct(workspace, full_calibration_file_path):
+                    workspaces.append(workspace)
+        except RuntimeError:
+            continue
+
+
+def use_cached_workspaces_from_ads(file_information,  is_transmission,  period, calibration_file_name):
+    """
+    Use cached workspaces from the ADS. This goes through the workspaces on the ADS and check on their sample logs
+    if there is an entry called sans_original_file_name and
+
+    This optimization uses already loaded workspaces from the ADS.
+    :param file_information: a SANSFileInformation object.
+    :param is_transmission: true if the workspaces are of transmission type
+    :param period: the selected period.
+    :param calibration_file_name: the name of the calibration file
+    :return: a list of workspaces and a list of monitors loaded from the ADS.
+    """
+    workspaces = []
+    workspace_monitors = []
+
+    full_calibration_file_path = find_full_file_path(calibration_file_name)
+
+    # Get the expected sans_original_workspace tag entries
+    file_tags = get_expected_file_tags(file_information, is_transmission, period)
+    get_workspaces_from_ads_if_exist(file_tags, full_calibration_file_path, workspaces)
+
+    if not is_transmission:
+        file_tags_monitors = [file_tag + MONITOR_SUFFIX for file_tag in file_tags]
+        get_workspaces_from_ads_if_exist(file_tags_monitors, full_calibration_file_path, workspace_monitors)
+
+    # Check if all required workspaces could be found on the ADS. For now, we allow only full loading, ie we don't
+    # allow picking up some child workspaces of a multi-period file from the ADS and having to load others. Either
+    # all are found in the ADS or we have to reload again. If we are loading a scatter workspace and the monitors
+    # are not complete, then we have to load the regular workspaces as well
+    if not has_loaded_correctly_from_ads(file_information, workspaces, period):
+        workspaces = []
+    if not is_transmission and not has_loaded_correctly_from_ads(file_information, workspace_monitors, period):
+        workspaces = []
+        workspace_monitors = []
+
+    return workspaces, workspace_monitors
+
+
+def tag_workspaces_with_file_names(workspaces, file_information, is_transmission, period,  is_monitor):
+    """
+    Set a sample log element for the used original file. Note that the calibration file name is set
+
+    :param workspaces: a dict of workspaces
+    :param file_information: a SANSFileInformation object
+    :param is_transmission: if is transmission.
+    :param period: the selected period.
+    :param is_monitor: if we are dealing with a monitor
+    """
+    # Set tag for the original file name from which the workspace was loaded
+
+    file_tags = get_expected_file_tags(file_information, is_transmission, period)
+    if len(file_tags) != len(workspaces):
+        raise RuntimeError("Issue while tagging the loaded data. The number of tags does not match the number "
+                           "of workspaces.")
+    for file_tag, workspace in zip(file_tags, workspaces):
+        if not has_tag(SANS_FILE_TAG, workspace):
+            if is_monitor:
+                file_tag += MONITOR_SUFFIX
+            set_tag(SANS_FILE_TAG, file_tag, workspace)
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Loading strategies
+# ----------------------------------------------------------------------------------------------------------------------
+
+# -----------------
+# Added data loader
+# -----------------
+def run_added_loader(loader, file_information, is_transmission, period):
+    """
+    Runs the loader for added workspaces.
+
+    This is a complicated matter. The added workspaces can be histogram- or event-based and can consist of
+    multi-period data.
+    1. Histogram Data: Since we use LoadNexusProcessed we cannot load the monitors separately. We have to make use of
+       the algorithm ExtractMonitors in order to split the detector from the monitor
+       (if we are dealing with non-transmission data)
+    2. Event Data: The added event data and the corresponding monitor data set are stored as two separate units in the
+       file. There are several cases to consider
+       i. We only have one period, this means that the first entry is the added event data and the second
+       entry is the added monitor data
+       ii. We have N periods. The first N entries are the added event data and the second N entries are the
+       corresponding monitors.
+       iii. We have N periods but only want to load period K. We get again only two entries but we need to
+       request the kth entry for the added event data and the k + NumPeriods entry for the monitor.
+
+    @param loader: a handles to a preset load algorithm
+    @param file_information: the FileInformation object
+    @param is_transmission: if  the set is a transmission
+    @param period: the selected period
+    @return:
+    """
+    def extract_histogram_data(load_alg, num_periods, selected_period):
+        ws_collection = []
+        if num_periods == 1:
+            ws_collection.append(load_alg.getProperty("OutputWorkspace").value)
+        elif num_periods > 1 and selected_period is not StateData.ALL_PERIODS:
+            ws_collection.append(load_alg.getProperty("OutputWorkspace").value)
+        else:
+            for index in range(1, num_periods + 1):
+                ws_collection.append(load_alg.getProperty(OUTPUT_WORKSPACE_GROUP + str(index)).value)
+        return ws_collection
+
+    def extract_event_data(load_alg, num_periods, selected_period):
+        ws_collection = []
+        ws_monitor_collection = []
+        if num_periods == 1 or (num_periods > 1 and selected_period is not StateData.ALL_PERIODS):
+            # First get the added event data
+            offset = num_periods
+            load_alg.setProperty("EntryNumber", selected_period)
+            load_alg.execute()
+            ws_collection.append(load_alg.getProperty("OutputWorkspace").value)
+
+            # Second get the added monitor data
+            load_alg.setProperty("EntryNumber", selected_period + offset)
+            load_alg.execute()
+            ws_monitor_collection.append(load_alg.getProperty("OutputWorkspace").value)
+        else:
+            load_alg.execute()
+            workspace_indices = list(range(1, number_of_periods + 1))
+            monitor_indices = list(range(number_of_periods + 1, number_of_periods*2 + 1))
+            for workspace_index, monitor_index in zip(workspace_indices, monitor_indices):
+                ws_collection.append(load_alg.getProperty(OUTPUT_WORKSPACE_GROUP + str(workspace_index)).value)
+                ws_monitor_collection.append(load_alg.getProperty(OUTPUT_WORKSPACE_GROUP + str(monitor_index)).value)
+        return ws_collection, ws_monitor_collection
+
+    workspaces = []
+    workspace_monitors = []
+    number_of_periods = file_information.get_number_of_periods()
+
+    # Dealing with added event data or histogram data is vastly different, hence we need to separate paths
+    if file_information.is_event_mode():
+        if is_transmission:
+            raise RuntimeError("SANSLoad: Cannot load event-mode data for transmission calculation. Attempted to "
+                               "load the file {0} which is event-based as transmission "
+                               "data.".format(file_information.get_file_name()))
+        workspaces, workspace_monitors = extract_event_data(loader, number_of_periods, period)
+    else:
+        # In the case of histogram data we need to consider the following.
+        # The data is combined with the monitors since we load with LoadNexusProcessed. Hence we need to split the
+        # workspace at this point with ExtractMonitors if we are not looking at a transmission run.
+        loader.execute()
+        workspace_collection = extract_histogram_data(loader, number_of_periods, period)
+        if not is_transmission:
+            extract_name = "ExtractMonitors"
+            extract_options = {"DetectorWorkspace": "dummy1",
+                               "MonitorWorkspace": "dummy2"}
+            extract_alg = create_unmanaged_algorithm(extract_name, **extract_options)
+            for workspace in workspace_collection:
+                extract_alg.setProperty("InputWorkspace", workspace)
+                extract_alg.execute()
+                workspaces.append(extract_alg.getProperty("DetectorWorkspace").value)
+                workspace_monitors.append(extract_alg.getProperty("MonitorWorkspace").value)
+        else:
+            for workspace in workspace_collection:
+                workspaces.append(workspace)
+    return workspaces, workspace_monitors
+
+
+def loader_for_added_isis_nexus(file_information, is_transmission, period):
+    """
+    Get the name and options for the load algorithm for ISIS nexus.
+
+    @param file_information: a SANSFileInformation object.
+    @param is_transmission: if the current file corresponds to transmission data
+    @param period: the period to load
+    @return: the name of the load algorithm and the selected load options
+    """
+    _ = is_transmission  # noqa
+    loader_name = "LoadNexusProcessed"
+    loader_options = {"Filename": file_information.get_file_name(),
+                      "OutputWorkspace": EMPTY_NAME,
+                      "LoadHistory": True,
+                      "FastMultiPeriod": True}
+    if period != StateData.ALL_PERIODS:
+        loader_options.update({"EntryNumber": period})
+    loader_alg = create_unmanaged_algorithm(loader_name, **loader_options)
+    return run_added_loader(loader_alg, file_information, is_transmission, period)
+
+
+# -----------------
+# Nexus data loader
+# -----------------
+def extract_multi_period_event_workspace(loader, index, output_workspace_property_name):
+    """
+    Extract a single workspace from a WorkspaceGroup.
+
+    Note that we need to perform a CloneWorkspace operation because this is the only way to get an individual workspace
+    from a WorkspaceGroup. They are extremely "sticky" and using the indexed access will only provide a weak pointer
+    which means that we will have a dead reference once the WorkspaceGroup goes out of scope
+    @param loader: an executed LoadEventNexus algorithm
+    @param index: an index variable into the GroupWorkspace, not that it is offset by 1
+    @param output_workspace_property_name: the name of the output workspace property, i.e. OutputWorkspace or
+                                           MonitorWorkspace
+    @return: a single workspace
+    """
+    group_workspace = loader.getProperty(output_workspace_property_name).value
+    group_workspace_index = index - 1
+    workspace_of_interest = group_workspace[group_workspace_index]
+
+    clone_name = "CloneWorkspace"
+    clone_options = {"InputWorkspace": workspace_of_interest,
+                     "OutputWorkspace": EMPTY_NAME}
+    clone_alg = create_unmanaged_algorithm(clone_name, **clone_options)
+    clone_alg.execute()
+    return clone_alg.getProperty("OutputWorkspace").value
+
+
+def loader_for_isis_nexus(file_information, is_transmission, period):
+    """
+    Get name and the options for the loading algorithm.
+
+    This takes a SANSFileInformation object and provides the inputs for the adequate loading strategy.
+    :param file_information: a SANSFileInformation object.
+    :param is_transmission: if the workspace is a transmission workspace.
+    :param period: the period to load.
+    :return: the name of the load algorithm and the selected load options.
+    """
+    loader_options = {"Filename": file_information.get_file_name(),
+                      "OutputWorkspace": EMPTY_NAME}
+    if file_information.is_event_mode():
+        loader_name = "LoadEventNexus"
+        # Note that currently we don't have a way to only load one monitor
+        loader_options.update({"LoadMonitors": True})
+    elif not file_information.is_event_mode() and not is_transmission:
+        loader_name = "LoadISISNexus"
+        loader_options.update({"LoadMonitors": "Separate",
+                               "EntryNumber": 0})
+        if period != StateData.ALL_PERIODS:
+            loader_options.update({"EntryNumber": period})
+    else:
+        # We must be dealing with a transmission file, we need to load the whole file.
+        # The file itself will most of the time only contain monitors anyway, but sometimes the detector
+        # is used as a sort of monitor, hence we cannot sort out the monitors.
+        loader_name = "LoadISISNexus"
+        loader_options.update({"LoadMonitors": "Include",
+                               "EntryNumber": 0})
+        if period != StateData.ALL_PERIODS:
+            loader_options.update({"EntryNumber": period})
+    loader_alg = create_unmanaged_algorithm(loader_name, **loader_options)
+    return run_loader(loader_alg, file_information, is_transmission, period)
+
+
+# ---------------
+# Raw data loader
+# ---------------
+def loader_for_raw(file_information, is_transmission, period):
+    """
+    Get the load algorithm information for an raw file
+
+    :param file_information: a SANSFileInformation object.
+    :param is_transmission: if the workspace is a transmission workspace.
+    :param period: the period to load.
+    :return: the name of the load algorithm and the selected load options.
+    """
+    loader_name = "LoadRaw"
+    loader_options = {"Filename": file_information.get_file_name(),
+                      "OutputWorkspace": EMPTY_NAME}
+    loader_options.update({"LoadMonitors": "Separate"})
+    if period != StateData.ALL_PERIODS:
+        loader_options.update({"PeriodList": period})
+    loader_alg = create_unmanaged_algorithm(loader_name, **loader_options)
+    workspaces, monitor_workspaces = run_loader(loader_alg, file_information, is_transmission, period)
+
+    # Add the sample details to the loaded workspace
+    sample_name = "LoadSampleDetailsFromRaw"
+    sample_options = {"Filename": file_information.get_file_name()}
+    sample_alg = create_unmanaged_algorithm(sample_name, **sample_options)
+
+    for workspace in workspaces:
+        sample_alg.setProperty("InputWorkspace", workspace)
+        sample_alg.execute()
+
+    for monitor_workspace in monitor_workspaces:
+        sample_alg.setProperty("InputWorkspace", monitor_workspace)
+        sample_alg.execute()
+
+    return workspaces, monitor_workspaces
+
+
+# ---------------
+# General
+# ---------------
+def run_loader(loader, file_information, is_transmission, period):
+    """
+    Runs the load algorithm.
+
+    This is a generalization which works for Raw and Nexus files which come directly from the instrument.
+    :param loader: a handle to the selected load algorithm/strategy
+    :param file_information: a SANSFileInformation object
+    :param is_transmission: if the workspace is a transmission workspace.
+    :param period: the selected period.
+    :return: a list of workspaces and a list of monitor workspaces
+    """
+    loader.execute()
+
+    # Get all output workspaces
+    number_of_periods = file_information.get_number_of_periods()
+
+    workspaces = []
+    # Either we have a single-period workspace or we want a single period from a multi-period workspace in which case
+    # we extract it via OutputWorkspace or we want all child workspaces of a multi-period workspace in which case we
+    # need to extract it via OutputWorkspace_1, OutputWorkspace_2, ...
+    # Important note: We cannot just grab the individual periods from the GroupWorkspace since all we get from
+    # the group workspace is a weak pointer, which invalidates our handle as soon as the group workspace goes
+    # out of scope. All of this makes sense for the ADS, but is a pain otherwise.
+    if number_of_periods == 1:
+        workspaces.append(loader.getProperty("OutputWorkspace").value)
+    elif number_of_periods > 1 and period is not StateData.ALL_PERIODS:
+        if file_information.is_event_mode():
+            workspaces.append(extract_multi_period_event_workspace(loader, period, "OutputWorkspace"))
+        else:
+            workspaces.append(loader.getProperty("OutputWorkspace").value)
+    else:
+        for index in range(1, number_of_periods + 1):
+            if file_information.is_event_mode():
+                workspaces.append(extract_multi_period_event_workspace(loader, index, "OutputWorkspace"))
+            else:
+                workspaces.append(loader.getProperty(OUTPUT_WORKSPACE_GROUP + str(index)).value)
+
+    workspace_monitors = []
+    if not is_transmission:
+        if number_of_periods == 1:
+            workspace_monitors.append(loader.getProperty(OUTPUT_MONITOR_WORKSPACE).value)
+        elif number_of_periods > 1 and period is not StateData.ALL_PERIODS:
+            if file_information.is_event_mode():
+                workspace_monitors.append(extract_multi_period_event_workspace(loader, period,
+                                                                               OUTPUT_MONITOR_WORKSPACE))
+            else:
+                workspace_monitors.append(loader.getProperty(OUTPUT_MONITOR_WORKSPACE).value)
+        else:
+            for index in range(1, number_of_periods + 1):
+                if file_information.is_event_mode():
+                    workspace_monitors.append(
+                        extract_multi_period_event_workspace(loader, index, OUTPUT_MONITOR_WORKSPACE))
+                else:
+                    workspace_monitors.append(loader.getProperty(OUTPUT_MONITOR_WORKSPACE_GROUP + str(index)).value)
+    if workspaces:
+        tag_workspaces_with_file_names(workspaces, file_information, is_transmission, period, is_monitor=False)
+    if workspace_monitors:
+        tag_workspaces_with_file_names(workspace_monitors, file_information, is_transmission, period, is_monitor=True)
+    return workspaces, workspace_monitors
+
+
+def get_loader_strategy(file_information):
+    """
+    Selects a loading strategy depending on the file type and if we are dealing with a transmission
+
+    :param file_information: a SANSFileInformation object.
+    :return: a handle to the correct loading function/strategy.
+    """
+    if file_information.get_type() == FileType.ISISNexus:
+        loader = loader_for_isis_nexus
+    elif file_information.get_type() == FileType.ISISRaw:
+        loader = loader_for_raw
+    elif file_information.get_type() == FileType.ISISNexusAdded:
+        loader = loader_for_added_isis_nexus
+    else:
+        raise RuntimeError("SANSLoad: Cannot load SANS file of type {0}".format(str(file_information.get_type())))
+    return loader
+
+
+def load_isis(data_type, file_information, period, use_cached, calibration_file_name):
+    """
+    Loads workspaces according a SANSFileInformation object for ISIS.
+
+    This function will select the correct loading strategy based on the information provided in the file_information.
+    :param data_type: the data type, i.e. sample scatter, sample transmission, etc.
+    :param file_information: a SANSFileInformation object.
+    :param period: the selected period.
+    :param use_cached: use cached workspaces on the ADS.
+    :param calibration_file_name: the calibration file name. Note that this is only used for cached loading of data
+                                  workspaces and not for loading of calibration files. We just want to make sure that
+                                  the potentially cached data has had the correct calibration file applied to it.
+    :return: a SANSDataType-Workspace map for data workspaces and a SANSDataType-Workspace map for monitor workspaces
+    """
+    workspace = []
+    workspace_monitor = []
+
+    is_transmission = is_transmission_type(data_type)
+
+    # Make potentially use of loaded workspaces. For now we can only identify them by their name
+    if use_cached:
+        workspace, workspace_monitor = use_cached_workspaces_from_ads(file_information, is_transmission, period,
+                                                                      calibration_file_name)
+
+    # Load the workspace if required. We need to load it if there is no workspace loaded from the cache or, in the case
+    # of scatter, ie. non-trans, there is no monitor workspace. There are several ways to load the data
+    if len(workspace) == 0 or (len(workspace_monitor) == 0 and not is_transmission):
+        loader = get_loader_strategy(file_information)
+        workspace, workspace_monitor = loader(file_information, is_transmission, period)
+
+    # Associate the data type with the workspace
+    workspace_pack = {data_type: workspace}
+    workspace_monitor_pack = {data_type: workspace_monitor} if len(workspace_monitor) > 0 else None
+
+    return workspace_pack, workspace_monitor_pack
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Load classes
+# ----------------------------------------------------------------------------------------------------------------------
+class SANSLoadData(with_metaclass(ABCMeta, object)):
+    """ Base class for all SANSLoad implementations."""
+
+    @abstractmethod
+    def do_execute(self, data_info, use_cached, publish_to_ads, progress):
+        pass
+
+    def execute(self, data_info, use_cached, publish_to_ads, progress):
+        SANSLoadData._validate(data_info)
+        return self.do_execute(data_info, use_cached, publish_to_ads, progress)
+
+    @staticmethod
+    def _validate(data_info):
+        if not isinstance(data_info, StateData):
+            raise ValueError("SANSLoad: The provided state information is of the wrong type. It must be"
+                             " of type StateData,but was {0}".format(str(type(data_info))))
+        data_info.validate()
+
+
+class SANSLoadDataISIS(SANSLoadData):
+    """Load implementation of SANSLoad for ISIS data"""
+    def do_execute(self, data_info, use_cached, publish_to_ads, progress):
+        # Get all entries from the state file
+        file_infos, period_infos = get_file_and_period_information_from_data(data_info)
+
+        # Several important remarks regarding the loading
+        # 1. Scatter files are loaded as with monitors and the data in two separate workspaces.
+        # 2. Transmission files are loaded entirely. They cannot be event-mode
+        # 3. Added data is handled differently because it is already processed data.
+
+        # Check that the transmission data is not event mode
+        if is_data_transmission_and_event_mode(file_infos):
+            raise RuntimeError("SANSLoad: You have provided an event-type file for a transmission workspace. "
+                               "Only histogram-type files are supported.")
+
+        workspaces = {}
+        workspace_monitors = {}
+
+        if data_info.calibration is not None:
+            calibration_file = data_info.calibration
+        else:
+            calibration_file = ""
+
+        for key, value in list(file_infos.items()):
+            # Loading
+            report_message = "Loading {0}".format(SANSDataType.to_string(key))
+            progress.report(report_message)
+
+            workspace_pack, workspace_monitors_pack = load_isis(key, value, period_infos[key],
+                                                                use_cached, calibration_file)
+
+            # Add them to the already loaded workspaces
+            workspaces.update(workspace_pack)
+            if workspace_monitors_pack is not None:
+                workspace_monitors.update(workspace_monitors_pack)
+
+        # Apply the calibration if any exists.
+        if data_info.calibration:
+            report_message = "Applying calibration."
+            progress.report(report_message)
+            apply_calibration(calibration_file, workspaces, workspace_monitors, use_cached, publish_to_ads)
+
+        return workspaces, workspace_monitors
+
+
+class SANSLoadDataFactory(object):
+    """ A factory for SANSLoadData."""
+    def __init__(self):
+        super(SANSLoadDataFactory, self).__init__()
+
+    @staticmethod
+    def _get_instrument_type(state):
+        data = state.data
+        # Get the correct loader based on the sample scatter file from the data sub state
+        data.validate()
+        file_info, _ = get_file_and_period_information_from_data(data)
+        sample_scatter_info = file_info[SANSDataType.SampleScatter]
+        return sample_scatter_info.get_instrument()
+
+    @staticmethod
+    def create_loader(state):
+        """
+        Provides the appropriate loader.
+
+        :param state: a SANSState object
+        :return: the corresponding loader
+        """
+        instrument_type = SANSLoadDataFactory._get_instrument_type(state)
+        if instrument_type is SANSInstrument.LARMOR or instrument_type is SANSInstrument.LOQ or\
+           instrument_type is SANSInstrument.SANS2D:
+            loader = SANSLoadDataISIS()
+        else:
+            raise RuntimeError("SANSLoaderFactory: Other instruments are not implemented yet.")
+        return loader
diff --git a/scripts/SANS/sans/algorithm_detail/move_workspaces.py b/scripts/SANS/sans/algorithm_detail/move_workspaces.py
new file mode 100644
index 0000000000000000000000000000000000000000..bc9dcdc11cd247e3a50b5ec06f411d773fd1b6d1
--- /dev/null
+++ b/scripts/SANS/sans/algorithm_detail/move_workspaces.py
@@ -0,0 +1,671 @@
+# pylint: disable=too-few-public-methods, invalid-name
+
+import math
+from mantid.api import MatrixWorkspace
+from abc import (ABCMeta, abstractmethod)
+from six import with_metaclass
+from sans.state.move import StateMove
+from sans.common.enums import (SANSInstrument, CanonicalCoordinates, DetectorType)
+from sans.common.general_functions import (create_unmanaged_algorithm, get_single_valued_logs_from_workspace,
+                                           quaternion_to_angle_and_axis)
+
+
+# -------------------------------------------------
+# Free functions
+# -------------------------------------------------
+def move_component(workspace, offsets, component_to_move):
+    """
+    Move an individual component on a workspace
+
+    :param workspace: the workspace which the component which is to be moved.
+    :param offsets: a Coordinate vs. Value map of offsets.
+    :param component_to_move: the name of a component on the instrument. This component must be name which exist.
+                              on the instrument.
+    :return:
+    """
+    move_name = "MoveInstrumentComponent"
+    move_options = {"Workspace": workspace,
+                    "ComponentName": component_to_move,
+                    "RelativePosition": True}
+    for key, value in list(offsets.items()):
+        if key is CanonicalCoordinates.X:
+            move_options.update({"X": value})
+        elif key is CanonicalCoordinates.Y:
+            move_options.update({"Y": value})
+        elif key is CanonicalCoordinates.Z:
+            move_options.update({"Z": value})
+        else:
+            raise RuntimeError("SANSMove: Trying to move the components along an unknown direction. "
+                               "See here: {0}".format(str(component_to_move)))
+    alg = create_unmanaged_algorithm(move_name, **move_options)
+    alg.execute()
+
+
+def rotate_component(workspace, angle, direction, component_to_rotate):
+    """
+    Rotate a component on a workspace.
+
+    :param workspace: the workspace which contains the component which is to be rotated.
+    :param angle: the angle by which it is to be rotated in degrees.
+    :param direction: the rotation direction. This is a unit vector encoded as a Coordinate vs Value map.
+    :param component_to_rotate: name of the component which is to be rotated
+    :return:
+    """
+    rotate_name = "RotateInstrumentComponent"
+    rotate_options = {"Workspace": workspace,
+                      "ComponentName": component_to_rotate,
+                      "RelativeRotation": "1"}
+    for key, value in list(direction.items()):
+        if key is CanonicalCoordinates.X:
+            rotate_options.update({"X": value})
+        elif key is CanonicalCoordinates.Y:
+            rotate_options.update({"Y": value})
+        elif key is CanonicalCoordinates.Z:
+            rotate_options.update({"Z": value})
+        else:
+            raise RuntimeError("SANSMove: Trying to rotate the components along an unknown direction. "
+                               "See here: {0}".format(str(component_to_rotate)))
+    rotate_options.update({"Angle": angle})
+    alg = create_unmanaged_algorithm(rotate_name, **rotate_options)
+    alg.execute()
+
+
+def move_sample_holder(workspace, sample_offset, sample_offset_direction):
+    """
+    Moves the sample holder by specified amount.
+
+    :param workspace: the workspace which will have its sample holder moved.
+    :param sample_offset: the offset value
+    :param sample_offset_direction: the offset direction (can only be currently along a canonical direction)
+    """
+    offset = {sample_offset_direction: sample_offset}
+    move_component(workspace, offset, 'some-sample-holder')
+
+
+def apply_standard_displacement(move_info, workspace, coordinates, component):
+    """
+    Applies a standard displacement to a workspace.
+
+    A standard displacement means here that it is a displacement along X and Y since Z is normally the direction
+    of the beam.
+    :param move_info: a StateMove object.
+    :param workspace: the workspace which is to moved.
+    :param coordinates: a list of coordinates by how much to move the component on the workspace. Note that currently
+                        only the first two entries are used.
+    :param component: the component which is to be moved.
+    """
+    # Get the detector name
+    component_name = move_info.detectors[component].detector_name
+    # Offset
+    offset = {CanonicalCoordinates.X: coordinates[0],
+              CanonicalCoordinates.Y: coordinates[1]}
+    move_component(workspace, offset, component_name)
+
+
+def is_zero_axis(axis):
+    """
+    Checks if the axis is potentially a null vector and hence not a useful axis at all.
+
+    :param axis: the axis to check.
+    :return: true if the axis is a null vector (or close to it) else false.
+    """
+    total_value = 0.0
+    for entry in axis:
+        total_value += abs(entry)
+    return total_value < 1e-4
+
+
+def set_selected_components_to_original_position(workspace, component_names):
+    """
+    Sets a component to its original (non-moved) position, i.e. the position one obtains when using standard loading.
+
+    The way we know the original position/rotation is the base instrument. We look at the difference between the
+    instrument and the base instrument and perform a reverse operation.
+
+    :param workspace: the workspace which will have the move applied to it.
+    :param component_names: the name of the component which is to be moved.
+    """
+    # First get the original rotation and position of the unaltered instrument components. This information
+    # is stored in the base instrument
+    instrument = workspace.getInstrument()
+    base_instrument = instrument.getBaseInstrument()
+
+    # Get the original position and rotation
+    for component_name in component_names:
+        base_component = base_instrument.getComponentByName(component_name)
+        moved_component = instrument.getComponentByName(component_name)
+
+        # It can be that monitors are already defined in the IDF but they cannot be found on the workspace. They
+        # are buffer monitor names which the experiments might use in the future. Hence we need to check if a component
+        # is zero at this point
+        if base_component is None or moved_component is None:
+            continue
+
+        base_position = base_component.getPos()
+        base_rotation = base_component.getRotation()
+
+        moved_position = moved_component.getPos()
+        moved_rotation = moved_component.getRotation()
+
+        move_alg = None
+        if base_position != moved_position:
+            if move_alg is None:
+                move_alg_name = "MoveInstrumentComponent"
+                move_alg_options = {"Workspace": workspace,
+                                    "RelativePosition": False,
+                                    "ComponentName": component_name,
+                                    "X": base_position[0],
+                                    "Y": base_position[1],
+                                    "Z": base_position[2]}
+                move_alg = create_unmanaged_algorithm(move_alg_name, **move_alg_options)
+            else:
+                move_alg.setProperty("ComponentName", component_name)
+                move_alg.setProperty("X", base_position[0])
+                move_alg.setProperty("Y", base_position[1])
+                move_alg.setProperty("Z", base_position[2])
+            move_alg.execute()
+
+        rot_alg = None
+        if base_rotation != moved_rotation:
+            angle, axis = quaternion_to_angle_and_axis(moved_rotation)
+            # If the axis is a zero vector then, we continue, there is nothing to rotate
+            if not is_zero_axis(axis):
+                if rot_alg is None:
+                    rot_alg_name = "RotateInstrumentComponent"
+                    rot_alg_options = {"Workspace": workspace,
+                                       "RelativeRotation": True,
+                                       "ComponentName": component_name,
+                                       "X": axis[0],
+                                       "Y": axis[1],
+                                       "Z": axis[2],
+                                       "Angle": -1*angle}
+                    rot_alg = create_unmanaged_algorithm(rot_alg_name, **rot_alg_options)
+                else:
+                    rot_alg.setProperty("ComponentName", component_name)
+                    rot_alg.setProperty("X", axis[0])
+                    rot_alg.setProperty("Y", axis[1])
+                    rot_alg.setProperty("Z", axis[2])
+                    rot_alg.setProperty("Angle", -1*angle)
+                rot_alg.execute()
+
+
+def set_components_to_original_for_isis(move_info, workspace, component):
+    """
+    This function resets the components for ISIS instruments. These are normally HAB, LAB, the monitors and
+    the sample holder
+
+    :param move_info: a StateMove object.
+    :param workspace: the workspace which is being reset.
+    :param component: the component which is being reset on the workspace. If this is not specified, then
+                      everything is being reset.
+    """
+    # We reset the HAB, the LAB, the sample holder and monitor 4
+    if not component:
+        hab_name = move_info.detectors[DetectorType.to_string(DetectorType.HAB)].detector_name
+        lab_name = move_info.detectors[DetectorType.to_string(DetectorType.LAB)].detector_name
+        component_names = list(move_info.monitor_names.values())
+        component_names.append(hab_name)
+        component_names.append(lab_name)
+        component_names.append("some-sample-holder")
+    else:
+        component_names = [component]
+
+    # We also want to check the sample holder
+    set_selected_components_to_original_position(workspace, component_names)
+
+
+def get_detector_component(move_info, component):
+    """
+    Gets the detector component on the workspace
+
+    :param move_info: a StateMove object.
+    :param component: A component name, ie a detector name or a short detector name as specified in the IPF.
+    :return: the key entry for detectors on the StateMove object which corresponds to the input component.
+    """
+    component_selection = component
+    if component:
+        for detector_key in list(move_info.detectors.keys()):
+            is_name = component == move_info.detectors[detector_key].detector_name
+            is_name_short = component == move_info.detectors[detector_key].detector_name_short
+            if is_name or is_name_short:
+                component_selection = detector_key
+    return component_selection
+
+
+# -------------------------------------------------
+# Move classes
+# -------------------------------------------------
+class SANSMove(with_metaclass(ABCMeta, object)):
+    def __init__(self):
+        super(SANSMove, self).__init__()
+
+    @abstractmethod
+    def do_move_initial(self, move_info, workspace, coordinates, component):
+        pass
+
+    @abstractmethod
+    def do_move_with_elementary_displacement(self, move_info, workspace, coordinates, component):
+        pass
+
+    @abstractmethod
+    def do_set_to_zero(self, move_info, workspace, component):
+        pass
+
+    @staticmethod
+    @abstractmethod
+    def is_correct(instrument_type, run_number, **kwargs):
+        pass
+
+    def move_initial(self, move_info, workspace, coordinates, component):
+        SANSMove._validate(move_info, workspace, coordinates, component)
+        component_selection = get_detector_component(move_info, component)
+        return self.do_move_initial(move_info, workspace, coordinates, component_selection)
+
+    def move_with_elementary_displacement(self, move_info, workspace, coordinates, component):
+        SANSMove._validate(move_info, workspace, coordinates, component)
+        component_selection = get_detector_component(move_info, component)
+        return self.do_move_with_elementary_displacement(move_info, workspace, coordinates, component_selection)
+
+    def set_to_zero(self, move_info, workspace, component):
+        SANSMove._validate_set_to_zero(move_info, workspace, component)
+        return self.do_set_to_zero(move_info, workspace, component)
+
+    @staticmethod
+    def _validate_component(move_info, component):
+        if component is not None and len(component) != 0:
+            found_name = False
+            for detector_keys in list(move_info.detectors.keys()):
+                is_name = component == move_info.detectors[detector_keys].detector_name
+                is_name_short = component == move_info.detectors[detector_keys].detector_name_short
+                if is_name or is_name_short:
+                    found_name = True
+                    break
+            if not found_name:
+                raise ValueError("SANSMove: The component to be moved {0} cannot be found in the"
+                                 " state information of type {1}".format(str(component), str(type(move_info))))
+
+    @staticmethod
+    def _validate_workspace(workspace):
+        if not isinstance(workspace, MatrixWorkspace):
+            raise ValueError("SANSMove: The input workspace has to be a MatrixWorkspace")
+
+    @staticmethod
+    def _validate_state(move_info):
+        if not isinstance(move_info, StateMove):
+            raise ValueError("SANSMove: The provided state information is of the wrong type. It must be"
+                             " of type StateMove, but was {0}".format(str(type(move_info))))
+
+    @staticmethod
+    def _validate(move_info, workspace, coordinates, component):
+        SANSMove._validate_state(move_info)
+        if coordinates is None or len(coordinates) == 0:
+            raise ValueError("SANSMove: The provided coordinates cannot be empty.")
+        SANSMove._validate_workspace(workspace)
+        SANSMove._validate_component(move_info, component)
+        move_info.validate()
+
+    @staticmethod
+    def _validate_set_to_zero(move_info, workspace, component):
+        SANSMove._validate_state(move_info)
+        SANSMove._validate_workspace(workspace)
+        SANSMove._validate_component(move_info, component)
+        move_info.validate()
+
+
+class SANSMoveSANS2D(SANSMove):
+    def __init__(self):
+        super(SANSMoveSANS2D, self).__init__()
+
+    @staticmethod
+    def perform_x_and_y_tilts(workspace, detector):
+        detector_name = detector.detector_name
+        # Perform rotation a y tilt correction. This tilt rotates around the instrument axis / around the X-AXIS!
+        y_tilt_correction = detector.y_tilt_correction
+        if y_tilt_correction != 0.0:
+            y_tilt_correction_direction = {CanonicalCoordinates.X: 1.0,
+                                           CanonicalCoordinates.Y: 0.0,
+                                           CanonicalCoordinates.Z: 0.0}
+            rotate_component(workspace, y_tilt_correction, y_tilt_correction_direction, detector_name)
+
+        # Perform rotation a x tilt correction. This tilt rotates around the instrument axis / around the Z-AXIS!
+        x_tilt_correction = detector.x_tilt_correction
+        if x_tilt_correction != 0.0:
+            x_tilt_correction_direction = {CanonicalCoordinates.X: 0.0,
+                                           CanonicalCoordinates.Y: 0.0,
+                                           CanonicalCoordinates.Z: 1.0}
+            rotate_component(workspace, x_tilt_correction, x_tilt_correction_direction, detector_name)
+
+# pylint: disable=too-many-locals
+    @staticmethod
+    def _move_high_angle_bank(move_info, workspace, coordinates):
+        # Get FRONT_DET_X, FRONT_DET_Z, FRONT_DET_ROT, REAR_DET_X
+        hab_detector_x_tag = "Front_Det_X"
+        hab_detector_z_tag = "Front_Det_Z"
+        hab_detector_rotation_tag = "Front_Det_ROT"
+        lab_detector_x_tag = "Rear_Det_X"
+
+        log_names = [hab_detector_x_tag, hab_detector_z_tag, hab_detector_rotation_tag, lab_detector_x_tag]
+        log_types = [float, float, float, float]
+        log_values = get_single_valued_logs_from_workspace(workspace, log_names, log_types,
+                                                           convert_from_millimeter_to_meter=True)
+
+        hab_detector_x = move_info.hab_detector_x \
+            if log_values[hab_detector_x_tag] is None else log_values[hab_detector_x_tag]
+
+        hab_detector_z = move_info.hab_detector_z \
+            if log_values[hab_detector_z_tag] is None else log_values[hab_detector_z_tag]
+
+        hab_detector_rotation = move_info.hab_detector_rotation \
+            if log_values[hab_detector_rotation_tag] is None else log_values[hab_detector_rotation_tag]
+        # When we read in the FRONT_Det_ROT tag, we divided by 1000. (since we converted the others to meter)
+        if log_values[hab_detector_rotation_tag] is not None:
+            hab_detector_rotation *= 1000.
+
+        lab_detector_x = move_info.lab_detector_x \
+            if log_values[lab_detector_x_tag] is None else log_values[lab_detector_x_tag]
+
+        # Fixed values
+        hab_detector_radius = move_info.hab_detector_radius
+        hab_detector_default_x_m = move_info.hab_detector_default_x_m
+        hab_detector_default_sd_m = move_info.hab_detector_default_sd_m
+
+        # Detector and name
+        hab_detector = move_info.detectors[DetectorType.to_string(DetectorType.HAB)]
+        detector_name = hab_detector.detector_name
+
+        # Perform x and y tilt
+        SANSMoveSANS2D.perform_x_and_y_tilts(workspace, hab_detector)
+
+        # Perform rotation of around the Y-AXIS. This is more complicated as the high angle bank detector is
+        # offset.
+        rotation_angle = (-hab_detector_rotation - hab_detector.rotation_correction)
+        rotation_direction = {CanonicalCoordinates.X: 0.0,
+                              CanonicalCoordinates.Y: 1.0,
+                              CanonicalCoordinates.Z: 0.0}
+        rotate_component(workspace, rotation_angle, rotation_direction, detector_name)
+
+        # Add translational corrections
+        x = coordinates[0]
+        y = coordinates[1]
+        lab_detector = move_info.detectors[DetectorType.to_string(DetectorType.LAB)]
+        rotation_in_radians = math.pi * (hab_detector_rotation + hab_detector.rotation_correction)/180.
+
+        x_shift = ((lab_detector_x + lab_detector.x_translation_correction -
+                    hab_detector_x - hab_detector.x_translation_correction -
+                    hab_detector.side_correction*(1.0 - math.cos(rotation_in_radians)) +
+                    (hab_detector_radius + hab_detector.radius_correction)*(math.sin(rotation_in_radians))) -
+                   hab_detector_default_x_m - x)
+        y_shift = hab_detector.y_translation_correction - y
+        z_shift = (hab_detector_z + hab_detector.z_translation_correction +
+                   (hab_detector_radius + hab_detector.radius_correction) * (1.0 - math.cos(rotation_in_radians)) -
+                   hab_detector.side_correction * math.sin(rotation_in_radians)) - hab_detector_default_sd_m
+
+        offset = {CanonicalCoordinates.X: x_shift,
+                  CanonicalCoordinates.Y: y_shift,
+                  CanonicalCoordinates.Z: z_shift}
+        move_component(workspace, offset, detector_name)
+
+    @staticmethod
+    def _move_low_angle_bank(move_info, workspace, coordinates):
+        # REAR_DET_Z
+        lab_detector_z_tag = "Rear_Det_Z"
+
+        log_names = [lab_detector_z_tag]
+        log_types = [float]
+        log_values = get_single_valued_logs_from_workspace(workspace, log_names, log_types,
+                                                           convert_from_millimeter_to_meter=True)
+
+        lab_detector_z = move_info.lab_detector_z \
+            if log_values[lab_detector_z_tag] is None else log_values[lab_detector_z_tag]
+
+        # Perform x and y tilt
+        lab_detector = move_info.detectors[DetectorType.to_string(DetectorType.LAB)]
+        SANSMoveSANS2D.perform_x_and_y_tilts(workspace, lab_detector)
+
+        lab_detector_default_sd_m = move_info.lab_detector_default_sd_m
+        x_shift = -coordinates[0]
+        y_shift = -coordinates[1]
+
+        z_shift = (lab_detector_z + lab_detector.z_translation_correction) - lab_detector_default_sd_m
+        detector_name = lab_detector.detector_name
+        offset = {CanonicalCoordinates.X: x_shift,
+                  CanonicalCoordinates.Y: y_shift,
+                  CanonicalCoordinates.Z: z_shift}
+        move_component(workspace, offset, detector_name)
+
+    @staticmethod
+    def _move_monitor_4(workspace, move_info):
+        if move_info.monitor_4_offset != 0.0:
+            monitor_4_name = move_info.monitor_names["4"]
+            instrument = workspace.getInstrument()
+            monitor_4 = instrument.getComponentByName(monitor_4_name)
+
+            # Get position of monitor 4
+            monitor_position = monitor_4.getPos()
+            z_position_monitor = monitor_position.getZ()
+
+            # The location is relative to the rear-detector, get this position
+            lab_detector = move_info.detectors[DetectorType.to_string(DetectorType.LAB)]
+            detector_name = lab_detector.detector_name
+            lab_detector_component = instrument.getComponentByName(detector_name)
+            detector_position = lab_detector_component.getPos()
+            z_position_detector = detector_position.getZ()
+
+            monitor_4_offset = move_info.monitor_4_offset / 1000.
+            z_new = z_position_detector + monitor_4_offset
+            z_move = z_new - z_position_monitor
+            offset = {CanonicalCoordinates.X: z_move}
+            move_component(workspace, offset, monitor_4_name)
+
+    def do_move_initial(self, move_info, workspace, coordinates, component):
+        # For LOQ we only have to coordinates
+        assert len(coordinates) == 2
+
+        _component = component  # noqa
+
+        # Move the high angle bank
+        self._move_high_angle_bank(move_info, workspace, coordinates)
+
+        # Move the low angle bank
+        self._move_low_angle_bank(move_info, workspace, coordinates)
+
+        # Move the sample holder
+        move_sample_holder(workspace, move_info.sample_offset, move_info.sample_offset_direction)
+
+        # Move monitor 4
+        self._move_monitor_4(workspace, move_info)
+
+    def do_move_with_elementary_displacement(self, move_info, workspace, coordinates, component):
+        # For LOQ we only have to coordinates
+        assert len(coordinates) == 2
+        coordinates_to_move = [-coordinates[0], -coordinates[1]]
+        apply_standard_displacement(move_info, workspace, coordinates_to_move, component)
+
+    def do_set_to_zero(self, move_info, workspace, component):
+        set_components_to_original_for_isis(move_info, workspace, component)
+
+    @staticmethod
+    def is_correct(instrument_type, run_number, **kwargs):
+        return True if instrument_type is SANSInstrument.SANS2D else False
+
+
+class SANSMoveLOQ(SANSMove):
+    def __init__(self):
+        super(SANSMoveLOQ, self).__init__()
+
+    def do_move_initial(self, move_info, workspace, coordinates, component):
+        # For LOQ we only have to coordinates
+        assert len(coordinates) == 2
+        # First move the sample holder
+        move_sample_holder(workspace, move_info.sample_offset, move_info.sample_offset_direction)
+
+        x = coordinates[0]
+        y = coordinates[1]
+        center_position = move_info.center_position
+
+        x_shift = center_position - x
+        y_shift = center_position - y
+
+        # Get the detector name
+        component_name = move_info.detectors[component].detector_name
+
+        # Shift the detector by the the input amount
+        offset = {CanonicalCoordinates.X: x_shift,
+                  CanonicalCoordinates.Y: y_shift}
+        move_component(workspace, offset, component_name)
+
+        # Shift the detector according to the corrections of the detector under investigation
+        offset_from_corrections = {CanonicalCoordinates.X: move_info.detectors[component].x_translation_correction,
+                                   CanonicalCoordinates.Y: move_info.detectors[component].y_translation_correction,
+                                   CanonicalCoordinates.Z: move_info.detectors[component].z_translation_correction}
+        move_component(workspace, offset_from_corrections, component_name)
+
+    def do_move_with_elementary_displacement(self, move_info, workspace, coordinates, component):
+        # For LOQ we only have to coordinates
+        assert len(coordinates) == 2
+        coordinates_to_move = [-coordinates[0], -coordinates[1]]
+        apply_standard_displacement(move_info, workspace, coordinates_to_move, component)
+
+    def do_set_to_zero(self, move_info, workspace, component):
+        set_components_to_original_for_isis(move_info, workspace, component)
+
+    @staticmethod
+    def is_correct(instrument_type, run_number, **kwargs):
+        return True if instrument_type is SANSInstrument.LOQ else False
+
+
+class SANSMoveLARMOROldStyle(SANSMove):
+    def __init__(self):
+        super(SANSMoveLARMOROldStyle, self).__init__()
+
+    def do_move_initial(self, move_info, workspace, coordinates, component):
+        # For LARMOR we only have to coordinates
+        assert len(coordinates) == 2
+
+        # Move the sample holder
+        move_sample_holder(workspace, move_info.sample_offset, move_info.sample_offset_direction)
+
+        # Shift the low-angle bank detector in the y direction
+        y_shift = -coordinates[1]
+        coordinates_for_only_y = [0.0, y_shift]
+        apply_standard_displacement(move_info, workspace, coordinates_for_only_y,
+                                    DetectorType.to_string(DetectorType.LAB))
+
+        # Shift the low-angle bank detector in the x direction
+        x_shift = -coordinates[0]
+        coordinates_for_only_x = [x_shift, 0.0]
+        apply_standard_displacement(move_info, workspace, coordinates_for_only_x,
+                                    DetectorType.to_string(DetectorType.LAB))
+
+    def do_move_with_elementary_displacement(self, move_info, workspace, coordinates, component):
+        # For LOQ we only have to coordinates
+        assert len(coordinates) == 2
+
+        # Shift component along the y direction
+        # Shift the low-angle bank detector in the y direction
+        y_shift = -coordinates[1]
+        coordinates_for_only_y = [0.0, y_shift]
+        apply_standard_displacement(move_info, workspace, coordinates_for_only_y, component)
+
+        # Shift component along the x direction
+        x_shift = -coordinates[0]
+        coordinates_for_only_x = [x_shift, 0.0]
+        apply_standard_displacement(move_info, workspace, coordinates_for_only_x, component)
+
+    def do_set_to_zero(self, move_info, workspace, component):
+        set_components_to_original_for_isis(move_info, workspace, component)
+
+    @staticmethod
+    def is_correct(instrument_type, run_number, **kwargs):
+        is_correct_instrument = instrument_type is SANSInstrument.LARMOR
+        is_correct_run_number = run_number < 2217
+        return True if is_correct_instrument and is_correct_run_number else False
+
+
+class SANSMoveLARMORNewStyle(SANSMove):
+    def __init__(self):
+        super(SANSMoveLARMORNewStyle, self).__init__()
+
+    @staticmethod
+    def _rotate_around_y_axis(move_info, workspace, angle, component, bench_rotation):
+        detector = move_info.detectors[component]
+        detector_name = detector.detector_name
+        # Note that the angle definition for the bench in LARMOR and in Mantid seem to have a different handedness
+        total_angle = bench_rotation - angle
+        direction = {CanonicalCoordinates.X: 0.0,
+                     CanonicalCoordinates.Y: 1.0,
+                     CanonicalCoordinates.Z: 0.0}
+        rotate_component(workspace, total_angle, direction, detector_name)
+
+    def do_move_initial(self, move_info, workspace, coordinates, component):
+        # For LARMOR we only have to coordinates
+        assert len(coordinates) == 2
+
+        # Move the sample holder
+        move_sample_holder(workspace, move_info.sample_offset, move_info.sample_offset_direction)
+
+        # Shift the low-angle bank detector in the y direction
+        y_shift = -coordinates[1]
+        coordinates_for_only_y = [0.0, y_shift]
+        apply_standard_displacement(move_info, workspace, coordinates_for_only_y,
+                                    DetectorType.to_string(DetectorType.LAB))
+
+        # Shift the low-angle bank detector in the x direction
+        angle = -coordinates[0]
+
+        bench_rot_tag = "Bench_Rot"
+        log_names = [bench_rot_tag]
+        log_types = [float]
+        log_values = get_single_valued_logs_from_workspace(workspace, log_names, log_types)
+        bench_rotation = move_info.bench_rotation \
+            if log_values[bench_rot_tag] is None else log_values[bench_rot_tag]
+
+        self._rotate_around_y_axis(move_info, workspace, angle,
+                                   DetectorType.to_string(DetectorType.LAB), bench_rotation)
+
+    def do_move_with_elementary_displacement(self, move_info, workspace, coordinates, component):
+        # For LOQ we only have to coordinates
+        assert len(coordinates) == 2
+
+        # Shift component along the y direction
+        # Shift the low-angle bank detector in the y direction
+        y_shift = -coordinates[1]
+        coordinates_for_only_y = [0.0, y_shift]
+        apply_standard_displacement(move_info, workspace, coordinates_for_only_y, component)
+
+        # Shift component along the x direction; not that we don't want to perform a bench rotation again
+        angle = coordinates[0]
+        self._rotate_around_y_axis(move_info, workspace, angle, component, 0.0)
+
+    def do_set_to_zero(self, move_info, workspace, component):
+        set_components_to_original_for_isis(move_info, workspace, component)
+
+    @staticmethod
+    def is_correct(instrument_type, run_number, **kwargs):
+        is_correct_instrument = instrument_type is SANSInstrument.LARMOR
+        is_correct_run_number = run_number >= 2217
+        return True if is_correct_instrument and is_correct_run_number else False
+
+
+class SANSMoveFactory(object):
+    def __init__(self):
+        super(SANSMoveFactory, self).__init__()
+
+    @staticmethod
+    def create_mover(workspace):
+        # Get selection
+        run_number = workspace.getRunNumber()
+        instrument = workspace.getInstrument()
+        instrument_type = SANSInstrument.from_string(instrument.getName())
+        if SANSMoveLOQ.is_correct(instrument_type, run_number):
+            mover = SANSMoveLOQ()
+        elif SANSMoveSANS2D.is_correct(instrument_type, run_number):
+            mover = SANSMoveSANS2D()
+        elif SANSMoveLARMOROldStyle.is_correct(instrument_type, run_number):
+            mover = SANSMoveLARMOROldStyle()
+        elif SANSMoveLARMORNewStyle.is_correct(instrument_type, run_number):
+            mover = SANSMoveLARMORNewStyle()
+        else:
+            mover = None
+            NotImplementedError("SANSLoaderFactory: Other instruments are not implemented yet.")
+        return mover
diff --git a/scripts/SANS/sans/common/constants.py b/scripts/SANS/sans/common/constants.py
index 86be890cd0ec46194ad50c5138ca391330bfc9e4..aaf756eb2d67f60867d4d6af379b17317ee26dd9 100644
--- a/scripts/SANS/sans/common/constants.py
+++ b/scripts/SANS/sans/common/constants.py
@@ -28,8 +28,8 @@ EMPTY_NAME = "dummy"
 SANS_SUFFIX = "sans"
 TRANS_SUFFIX = "trans"
 
-high_angle_bank = "HAB"
-low_angle_bank = "LAB"
+HIGH_ANGLE_BANK = "HAB"
+LOW_ANGLE_BANK = "LAB"
 
 SANS2D = "SANS2D"
 LARMOR = "LARMOR"
diff --git a/scripts/SANS/sans/common/enums.py b/scripts/SANS/sans/common/enums.py
index 302e710ce9a6a9f599e0ee7ba2a640adf222bcc2..dce908b8c4bfc75c8bb365d2cb5780b61ea39a37 100644
--- a/scripts/SANS/sans/common/enums.py
+++ b/scripts/SANS/sans/common/enums.py
@@ -299,3 +299,28 @@ def convert_int_to_shape(shape_int):
 @serializable_enum("ISISNexus", "ISISNexusAdded", "ISISRaw", "NoFileType")
 class FileType(object):
     pass
+
+
+# ---------------------------
+# OutputMode
+# ---------------------------
+@string_convertible
+@serializable_enum("PublishToADS", "SaveToFile", "Both")
+class OutputMode(object):
+    """
+    Defines the output modes of a batch reduction.
+    """
+    pass
+
+
+# ------------------------------
+# Entries of batch reduction file
+# -------------------------------
+@string_convertible
+@serializable_enum("SampleScatter", "SampleTransmission", "SampleDirect", "CanScatter", "CanTransmission", "CanDirect",
+                   "Output", "UserFile")
+class BatchReductionEntry(object):
+    """
+    Defines the entries of a batch reduction file.
+    """
+    pass
diff --git a/scripts/SANS/sans/common/file_information.py b/scripts/SANS/sans/common/file_information.py
index eeac9dfe467befe6bcd64dc1854120299d8c364d..a4b13f3404291fbbc5cf4f72ccfd07c104d43f5f 100644
--- a/scripts/SANS/sans/common/file_information.py
+++ b/scripts/SANS/sans/common/file_information.py
@@ -6,18 +6,56 @@ from __future__ import (absolute_import, division, print_function)
 import os
 import h5py as h5
 from abc import (ABCMeta, abstractmethod)
-
 from mantid.api import FileFinder
 from mantid.kernel import (DateAndTime, ConfigService)
 from mantid.api import (AlgorithmManager, ExperimentInfo)
 from sans.common.enums import (SANSInstrument, FileType)
-
+from sans.common.constants import (SANS2D, LARMOR, LOQ)
 from six import with_metaclass
 
 
-# -----------------------------------
-# Free Functions
-# -----------------------------------
+# ----------------------------------------------------------------------------------------------------------------------
+# Constants
+# ----------------------------------------------------------------------------------------------------------------------
+# File extensions
+NXS_EXTENSION = "nxs"
+RAW_EXTENSION = "raw"
+RAW_EXTENSION_WITH_DOT = ".RAW"
+
+ADDED_SUFFIX = "-add_added_event_data"
+ADDED_MONITOR_SUFFIX = "-add_monitors_added_event_data"
+ADD_FILE_SUFFIX = "-ADD.NXS"
+
+PARAMETERS_XML_SUFFIX = "_Parameters.xml"
+
+
+# Nexus key words
+RAW_DATA_1 = "raw_data_1"
+PERIODS = "periods"
+PROTON_CHARGE = "proton_charge"
+INSTRUMENT = "instrument"
+NAME = "name"
+START_TIME = "start_time"
+RUN_NUMBER = "run_number"
+NX_CLASS = "NX_class"
+NX_EVENT_DATA = "NXevent_data"
+LOGS = "logs"
+VALUE = "value"
+WORKSPACE_NAME = "workspace_name"
+END_TIME = "r_endtime"
+END_DATE = "r_enddate"
+MANTID_WORKSPACE_PREFIX = 'mantid_workspace_'
+EVENT_WORKSPACE = "event_workspace"
+
+# Other
+ALTERNATIVE_SANS2D_NAME = "SAN"
+DEFINITION = "Definition"
+PARAMETERS = "Parameters"
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# General functions
+# ----------------------------------------------------------------------------------------------------------------------
 def find_full_file_path(file_name):
     """
     Gets the full path of a file name if it is available on the Mantid paths.
@@ -56,9 +94,9 @@ def get_extension_for_file_type(file_info):
     :return: the extension a stirng. This can be either nxs or raw.
     """
     if file_info.get_type() is FileType.ISISNexus or file_info.get_type() is FileType.ISISNexusAdded:
-        extension = "nxs"
+        extension = NXS_EXTENSION
     elif file_info.get_type() is FileType.ISISRaw:
-        extension = "raw"
+        extension = RAW_EXTENSION
     else:
         raise RuntimeError("The file extension type for a file of type {0} is unknown"
                            "".format(str(file_info.get_type())))
@@ -114,13 +152,13 @@ def get_instrument_paths_for_sans_file(file_name):
     def get_ipf_equivalent_name(path):
         # If XXX_Definition_Yyy.xml is the IDF name, then the equivalent  IPF name is: XXX_Parameters_Yyy.xml
         base_file_name = os.path.basename(path)
-        return base_file_name.replace("Definition", "Parameters")
+        return base_file_name.replace(DEFINITION, PARAMETERS)
 
     def get_ipf_standard_name(path):
         # If XXX_Definition_Yyy.xml is the IDF name, then the standard IPF name is: XXX_Parameters.xml
         base_file_name = os.path.basename(path)
         elements = base_file_name.split("_")
-        return elements[0] + "_Parameters.xml"
+        return elements[0] + PARAMETERS_XML_SUFFIX
 
     def check_for_files(directory, path):
         # Check if XXX_Parameters_Yyy.xml exists in the same folder
@@ -189,9 +227,9 @@ def get_instrument_paths_for_sans_file(file_name):
                        "available for {0}".format(str(idf_path)))
 
 
-# ----------------------------------------------
-# Methods for ISIS Nexus
-# ---------------------------------------------
+# ----------------------------------------------------------------------------------------------------------------------
+# Functions for ISIS Nexus
+# ----------------------------------------------------------------------------------------------------------------------
 def get_isis_nexus_info(file_name):
     """
     Get information if is ISIS Nexus and the number of periods.
@@ -202,11 +240,14 @@ def get_isis_nexus_info(file_name):
     try:
         with h5.File(file_name) as h5_file:
             keys = list(h5_file.keys())
-            is_isis_nexus = "raw_data_1" in keys
-            first_entry = h5_file["raw_data_1"]
-            period_group = first_entry["periods"]
-            proton_charge_data_set = period_group["proton_charge"]
-            number_of_periods = len(proton_charge_data_set)
+            is_isis_nexus = RAW_DATA_1 in keys
+            if is_isis_nexus:
+                first_entry = h5_file[RAW_DATA_1]
+                period_group = first_entry[PERIODS]
+                proton_charge_data_set = period_group[PROTON_CHARGE]
+                number_of_periods = len(proton_charge_data_set)
+            else:
+                number_of_periods = -1
     except IOError:
         is_isis_nexus = False
         number_of_periods = -1
@@ -238,11 +279,11 @@ def get_instrument_name_for_isis_nexus(file_name):
         keys = list(h5_file.keys())
         first_entry = h5_file[keys[0]]
         # Open instrument group
-        instrument_group = first_entry["instrument"]
+        instrument_group = first_entry[INSTRUMENT]
         # Open name data set
-        name_data_set = instrument_group["name"]
+        name_data_set = instrument_group[NAME]
         # Read value
-        instrument_name = name_data_set[0]
+        instrument_name = name_data_set[0].decode("utf-8")
     return instrument_name
 
 
@@ -264,12 +305,12 @@ def get_top_level_nexus_entry(file_name, entry_name):
 
 
 def get_date_for_isis_nexus(file_name):
-    value = get_top_level_nexus_entry(file_name, "start_time")
+    value = get_top_level_nexus_entry(file_name, START_TIME)
     return DateAndTime(value)
 
 
 def get_run_number_for_isis_nexus(file_name):
-    return int(get_top_level_nexus_entry(file_name, "run_number"))
+    return int(get_top_level_nexus_entry(file_name, RUN_NUMBER))
 
 
 def get_event_mode_information(file_name):
@@ -287,30 +328,195 @@ def get_event_mode_information(file_name):
         # Open instrument group
         is_event_mode = False
         for value in list(first_entry.values()):
-            if "NX_class" in value.attrs and "NXevent_data" == value.attrs["NX_class"]:
+            if NX_CLASS in value.attrs and NX_EVENT_DATA == value.attrs[NX_CLASS].decode("utf-8"):
                 is_event_mode = True
                 break
     return is_event_mode
 
 
-# ---------
+# ----------------------------------------------------------------------------------------------------------------------
+# Functions for Added data
+# ----------------------------------------------------------------------------------------------------------------------
+# 1. An added file will always have a file name SOME_NAME-add.nxs, ie we can preselect an added file by the -add
+#    qualifier
+# 2. Scenario 1: Added histogram data, ie files which were added and saved as histogram data.
+# 2.1 Added histogram files will contain one or more (if they were based on multi-period files) entries in the hdfd5
+#    file where the first level entry will be named mantid_workspace_X where X=1,2,3,... . Note that the numbers
+#    correspond  to periods.
+# 3. Scenario 2: Added event data, ie files which were added and saved as event data.
+
+def get_date_and_run_number_added_nexus(file_name):
+    with h5.File(file_name) as h5_file:
+        keys = list(h5_file.keys())
+        first_entry = h5_file[keys[0]]
+        logs = first_entry[LOGS]
+        # Start time
+        start_time = logs[START_TIME]
+        start_time_value = DateAndTime(start_time[VALUE][0])
+        # Run number
+        run_number = logs[RUN_NUMBER]
+        run_number_value = int(run_number[VALUE][0])
+    return start_time_value, run_number_value
+
+
+def get_added_nexus_information(file_name):  # noqa
+    """
+    Get information if is added data and the number of periods.
+
+    :param file_name: the full file path.
+    :return: if the file was a Nexus file and the number of periods.
+    """
+    def get_all_keys_for_top_level(key_collection):
+        top_level_key_collection = []
+        for key in key_collection:
+            if key.startswith(MANTID_WORKSPACE_PREFIX):
+                top_level_key_collection.append(key)
+        return sorted(top_level_key_collection)
+
+    def check_if_event_mode(entry):
+        return EVENT_WORKSPACE in list(entry.keys())
+
+    def get_workspace_name(entry):
+        return entry[WORKSPACE_NAME][0].decode("utf-8")
+
+    def has_same_number_of_entries(workspace_names, monitor_workspace_names):
+        return len(workspace_names) == len(monitor_workspace_names)
+
+    def has_added_tag(workspace_names, monitor_workspace_names):
+        # Check data
+        all_have_added_tag = all([ADDED_SUFFIX in ws_name for ws_name in workspace_names])
+        if all_have_added_tag:
+            # Check monitors
+            all_have_added_tag = all([ADDED_MONITOR_SUFFIX in ws_name for ws_name in monitor_workspace_names])
+        return all_have_added_tag
+
+    def entries_match(workspace_names, monitor_workspace_names):
+        altered_names = [ws_name.replace(ADDED_SUFFIX, ADDED_MONITOR_SUFFIX) for ws_name in workspace_names]
+        return all([ws_name in monitor_workspace_names for ws_name in altered_names])
+
+    def get_added_event_info(h5_file_handle, key_collection):
+        """
+        We expect to find one event workspace and one histogram workspace per period
+        """
+        workspace_names = []
+        monitor_workspace_names = []
+        for key in key_collection:
+            entry = h5_file_handle[key]
+            is_event_mode = check_if_event_mode(entry)
+            workspace_name = get_workspace_name(entry)
+            if is_event_mode:
+                workspace_names.append(workspace_name)
+            else:
+                monitor_workspace_names.append(workspace_name)
+
+        # There are several criteria which need to be full filled to be sure that we are dealing with added event data
+        # 1. There have to be the same number of event and non-event entries, since your each data set we have a
+        #    monitor data set.
+        # 2. Every data entry needs to have "-ADD_ADDED_EVENT_DATA" in the workspace name and every
+        #    monitor data entry needs to have a "ADD_MONITORS_ADDED_EVENT_DATA" in the workspace name.
+        # 3. Every data entry has matching monitor entry, e.g. random_name-add_added_event_data_4 needs
+        #    random_name-add_monitors_added_event_data_4.s
+        if (has_same_number_of_entries(workspace_names, monitor_workspace_names) and
+            has_added_tag(workspace_names, monitor_workspace_names) and
+                entries_match(workspace_names, monitor_workspace_names)):
+            is_added_file_event = True
+            num_periods = len(workspace_names)
+        else:
+            is_added_file_event = False
+            num_periods = 1
+
+        return is_added_file_event, num_periods
+
+    def get_added_histogram_info(h5_file_handle, key_collection):
+        # We only have to make sure that all entries are non-event type
+        is_added_file_histogram = True
+        num_periods = len(key_collection)
+        for key in key_collection:
+            entry = h5_file_handle[key]
+            if check_if_event_mode(entry):
+                is_added_file_histogram = False
+                num_periods = 1
+                break
+        return is_added_file_histogram, num_periods
+
+    if has_added_suffix(file_name):
+        try:
+            with h5.File(file_name) as h5_file:
+                # Get all mantid_workspace_X keys
+                keys = list(h5_file.keys())
+                top_level_keys = get_all_keys_for_top_level(keys)
+
+                # Check if entries are added event data, if we don't have a hit, then it can always be
+                # added histogram data
+                is_added_event_file, number_of_periods_event = get_added_event_info(h5_file, top_level_keys)
+                is_added_histogram_file, number_of_periods_histogram = get_added_histogram_info(h5_file, top_level_keys)
+
+                if is_added_event_file:
+                    is_added = True
+                    is_event = True
+                    number_of_periods = number_of_periods_event
+                elif is_added_histogram_file:
+                    is_added = True
+                    is_event = False
+                    number_of_periods = number_of_periods_histogram
+                else:
+                    is_added = True
+                    is_event = False
+                    number_of_periods = 1
+        except IOError:
+            is_added = False
+            is_event = False
+            number_of_periods = 1
+    else:
+        is_added = False
+        is_event = False
+        number_of_periods = 1
+    return is_added, number_of_periods, is_event
+
+
+def get_date_for_added_workspace(file_name):
+    value = get_top_level_nexus_entry(file_name, START_TIME)
+    return DateAndTime(value)
+
+
+def has_added_suffix(file_name):
+    return file_name.upper().endswith(ADD_FILE_SUFFIX)
+
+
+def is_added_histogram(file_name):
+    is_added, _, is_event = get_added_nexus_information(file_name)
+    return is_added and not is_event
+
+
+def is_added_event(file_name):
+    is_added, _, is_event = get_added_nexus_information(file_name)
+    return is_added and is_event
+
+
+# ----------------------------------------------------------------------------------------------------------------------
 # ISIS Raw
-# ---------
+# ----------------------------------------------------------------------------------------------------------------------
 def get_raw_info(file_name):
-    try:
-        alg_info = AlgorithmManager.createUnmanaged("RawFileInfo")
-        alg_info.initialize()
-        alg_info.setChild(True)
-        alg_info.setProperty("Filename", file_name)
-        alg_info.setProperty("GetRunParameters", True)
-        alg_info.execute()
-
-        periods = alg_info.getProperty("PeriodCount").value
-        is_raw = True
-        number_of_periods = periods
-    except IOError:
+    # Preselect files which don't end with .raw
+    split_file_name, file_extension = os.path.splitext(file_name)
+    if file_extension.upper() != RAW_EXTENSION_WITH_DOT:
         is_raw = False
         number_of_periods = -1
+    else:
+        try:
+            alg_info = AlgorithmManager.createUnmanaged("RawFileInfo")
+            alg_info.initialize()
+            alg_info.setChild(True)
+            alg_info.setProperty("Filename", file_name)
+            alg_info.setProperty("GetRunParameters", True)
+            alg_info.execute()
+
+            periods = alg_info.getProperty("PeriodCount").value
+            is_raw = True
+            number_of_periods = periods
+        except IOError:
+            is_raw = False
+            number_of_periods = -1
 
     return is_raw, number_of_periods
 
@@ -337,7 +543,7 @@ def get_from_raw_header(file_name, index):
 
 
 def instrument_name_correction(instrument_name):
-    return "SANS2D" if instrument_name == "SAN" else instrument_name
+    return SANS2D if instrument_name == ALTERNATIVE_SANS2D_NAME else instrument_name
 
 
 def get_instrument_name_for_raw(file_name):
@@ -384,8 +590,8 @@ def get_date_for_raw(file_name):
 
     keys = run_parameters.getColumnNames()
 
-    time_id = "r_endtime"
-    date_id = "r_enddate"
+    time_id = END_TIME
+    date_id = END_DATE
 
     time = run_parameters.column(keys.index(time_id))
     date = run_parameters.column(keys.index(date_id))
@@ -394,9 +600,22 @@ def get_date_for_raw(file_name):
     return get_raw_measurement_time(date, time)
 
 
-# -----------------------------------------------
+def get_instrument(instrument_name):
+    instrument_name = instrument_name.upper()
+    if instrument_name == SANS2D:
+        instrument = SANSInstrument.SANS2D
+    elif instrument_name == LARMOR:
+        instrument = SANSInstrument.LARMOR
+    elif instrument_name == LOQ:
+        instrument = SANSInstrument.LOQ
+    else:
+        instrument = SANSInstrument.NoInstrument
+    return instrument
+
+
+# ----------------------------------------------------------------------------------------------------------------------
 # SANS file Information
-# -----------------------------------------------
+# ----------------------------------------------------------------------------------------------------------------------
 class SANSFileInformation(with_metaclass(ABCMeta, object)):
     def __init__(self, file_name):
         self._file_name = file_name
@@ -425,6 +644,14 @@ class SANSFileInformation(with_metaclass(ABCMeta, object)):
     def get_run_number(self):
         pass
 
+    @abstractmethod
+    def is_event_mode(self):
+        pass
+
+    @abstractmethod
+    def is_added_data(self):
+        pass
+
     @staticmethod
     def get_full_file_name(file_name):
         return find_sans_file(file_name)
@@ -471,6 +698,50 @@ class SANSFileInformationISISNexus(SANSFileInformation):
     def is_event_mode(self):
         return self._is_event_mode
 
+    def is_added_data(self):
+        return False
+
+
+class SANSFileInformationISISAdded(SANSFileInformation):
+    def __init__(self, file_name):
+        super(SANSFileInformationISISAdded, self).__init__(file_name)
+        # Setup instrument name
+        self._full_file_name = SANSFileInformation.get_full_file_name(self._file_name)
+        instrument_name = get_instrument_name_for_isis_nexus(self._full_file_name)
+        self._instrument_name = get_instrument(instrument_name)
+
+        date, run_number = get_date_and_run_number_added_nexus(self._full_file_name)
+        self._date = date
+        self._run_number = run_number
+
+        _,  number_of_periods, is_event = get_added_nexus_information(self._full_file_name)
+        self._number_of_periods = number_of_periods
+        self._is_event_mode = is_event
+
+    def get_file_name(self):
+        return self._full_file_name
+
+    def get_instrument(self):
+        return self._instrument_name
+
+    def get_date(self):
+        return self._date
+
+    def get_number_of_periods(self):
+        return self._number_of_periods
+
+    def get_run_number(self):
+        return self._run_number
+
+    def get_type(self):
+        return FileType.ISISNexusAdded
+
+    def is_event_mode(self):
+        return self._is_event_mode
+
+    def is_added_data(self):
+        return True
+
 
 class SANSFileInformationRaw(SANSFileInformation):
     def __init__(self, file_name):
@@ -507,6 +778,12 @@ class SANSFileInformationRaw(SANSFileInformation):
     def get_type(self):
         return FileType.ISISRaw
 
+    def is_event_mode(self):
+        return False
+
+    def is_added_data(self):
+        return False
+
 
 class SANSFileInformationFactory(object):
     def __init__(self):
@@ -518,7 +795,8 @@ class SANSFileInformationFactory(object):
             file_information = SANSFileInformationISISNexus(full_file_name)
         elif is_raw_single_period(full_file_name) or is_raw_multi_period(full_file_name):
             file_information = SANSFileInformationRaw(full_file_name)
-        # TODO: ADD added nexus files here
+        elif is_added_histogram(full_file_name) or is_added_event(full_file_name):
+            file_information = SANSFileInformationISISAdded(full_file_name)
         else:
             raise NotImplementedError("The file type you have provided is not implemented yet.")
         return file_information
diff --git a/scripts/SANS/sans/common/general_functions.py b/scripts/SANS/sans/common/general_functions.py
index d1c3cd467dbb58c1aec243bbd452ce4508c89a83..e88272161f72955433245cd7c9817c35c08b3a9f 100644
--- a/scripts/SANS/sans/common/general_functions.py
+++ b/scripts/SANS/sans/common/general_functions.py
@@ -4,10 +4,12 @@
 
 from __future__ import (absolute_import, division, print_function)
 from math import (acos, sqrt, degrees)
+import re
 from mantid.api import AlgorithmManager, AnalysisDataService
 from mantid.kernel import (DateAndTime)
 from sans.common.constants import SANS_FILE_TAG
 from sans.common.log_tagger import (get_tag, has_tag, set_tag)
+from sans.common.enums import (DetectorType, RangeStepType)
 
 
 # -------------------------------------------
@@ -75,7 +77,7 @@ def get_single_valued_logs_from_workspace(workspace, log_names, log_types, conve
         log_value = get_log_value(run, log_name, log_type)
         log_results.update({log_name: log_value})
     if convert_from_millimeter_to_meter:
-        for key in log_results.keys():
+        for key in list(log_results.keys()):
             log_results[key] /= 1000.
     return log_results
 
@@ -91,7 +93,7 @@ def create_unmanaged_algorithm(name, **kwargs):
     alg = AlgorithmManager.createUnmanaged(name)
     alg.initialize()
     alg.setChild(True)
-    for key, value in kwargs.items():
+    for key, value in list(kwargs.items()):
         alg.setProperty(key, value)
     return alg
 
@@ -180,3 +182,199 @@ def get_ads_workspace_references():
     """
     for workspace_name in AnalysisDataService.getObjectNames():
         yield AnalysisDataService.retrieve(workspace_name)
+
+
+def convert_bank_name_to_detector_type_isis(detector_name):
+    """
+    Converts a detector name of an isis detector to a detector type.
+
+    The current translation is
+    SANS2D: rear-detector -> LAB
+            front-detector -> HAB
+            but also allowed rear, front
+    LOQ:    main-detector-bank -> LAB
+            HAB                -> HAB
+            but also allowed main
+    LARMOR: DetectorBench      -> LAB
+
+    @param detector_name: a string with a valid detector name
+    @return: a detector type depending on the input string, or a runtime exception.
+    """
+    detector_name = detector_name.upper()
+    detector_name = detector_name.strip()
+    if detector_name == "REAR-DETECTOR" or detector_name == "MAIN-DETECTOR-BANK" or detector_name == "DETECTORBENCH" \
+            or detector_name == "REAR" or detector_name == "MAIN":
+        detector_type = DetectorType.LAB
+    elif detector_name == "FRONT-DETECTOR" or detector_name == "HAB" or detector_name == "FRONT":
+        detector_type = DetectorType.HAB
+    else:
+        raise RuntimeError("There is not detector type conversion for a detector with the "
+                           "name {0}".format(detector_name))
+    return detector_type
+
+
+def parse_event_slice_setting(string_to_parse):
+    """
+    Create a list of boundaries from a string defining the slices.
+    Valid syntax is:
+      * From 8 to 9 > '8-9' --> return [[8,9]]
+      * From 8 to 9 and from 10 to 12 > '8-9, 10-12' --> return [[8,9],[10,12]]
+      * From 5 to 10 in steps of 1 > '5:1:10' --> return [[5,6],[6,7],[7,8],[8,9],[9,10]]
+      * From 5 > '>5' --> return [[5, -1]]
+      * Till 5 > '<5' --> return [[-1,5]]
+
+    Any combination of these syntax separated by comma is valid.
+    A special mark is used to signal no limit: -1,
+    As, so, for an empty string, it will return: None.
+
+    It does not accept negative values.
+    """
+
+    def _does_match(compiled_regex, line):
+        return compiled_regex.match(line) is not None
+
+    def _extract_simple_slice(line):
+        start, stop = line.split("-")
+        start = float(start)
+        stop = float(stop)
+        if start > stop:
+            raise ValueError("Parsing event slices. It appears that the start value {0} is larger than the stop "
+                             "value {1}. Make sure that this is not the case.")
+        return [start, stop]
+
+    def float_range(start, stop, step):
+        while start < stop:
+            yield start
+            start += step
+
+    def _extract_slice_range(line):
+        split_line = line.split(":")
+        start = float(split_line[0])
+        step = float(split_line[1])
+        stop = float(split_line[2])
+        if start > stop:
+            raise ValueError("Parsing event slices. It appears that the start value {0} is larger than the stop "
+                             "value {1}. Make sure that this is not the case.")
+
+        elements = list(float_range(start, stop, step))
+        # We are missing the last element
+        elements.append(stop)
+
+        # We generate ranges with [[element[0], element[1]], [element[1], element[2]], ...]
+        ranges = list(zip(elements[:-1], elements[1:]))
+        return [[e1, e2] for e1, e2 in ranges]
+
+    def _extract_full_range(line, range_marker_pattern):
+        is_lower_bound = ">" in line
+        line = re.sub(range_marker_pattern, "", line)
+        value = float(line)
+        if is_lower_bound:
+            return [value, -1]
+        else:
+            return [-1, value]
+
+    # Check if the input actually exists.
+    if not string_to_parse:
+        return None
+
+    number = r'(\d+(?:\.\d+)?(?:[eE][+-]\d+)?)'  # float without sign
+    simple_slice_pattern = re.compile("\\s*" + number + "\\s*" r'-' + "\\s*" + number + "\\s*")
+    slice_range_pattern = re.compile("\\s*" + number + "\\s*" + r':' + "\\s*" + number + "\\s*"
+                                     + r':' + "\\s*" + number)
+    full_range_pattern = re.compile("\\s*" + "(<|>)" + "\\s*" + number + "\\s*")
+
+    range_marker = re.compile("[><]")
+
+    slice_settings = string_to_parse.split(',')
+    all_ranges = []
+    for slice_setting in slice_settings:
+        slice_setting = slice_setting.replace(' ', '')
+        # We can have three scenarios
+        # 1. Simple Slice:     X-Y
+        # 2. Slice range :     X:Y:Z
+        # 3. Slice full range: >X or <X
+        if _does_match(simple_slice_pattern, slice_setting):
+            all_ranges.append(_extract_simple_slice(slice_setting))
+        elif _does_match(slice_range_pattern, slice_setting):
+            all_ranges.extend(_extract_slice_range(slice_setting))
+        elif _does_match(full_range_pattern, slice_setting):
+            all_ranges.append(_extract_full_range(slice_setting, range_marker))
+        else:
+            raise ValueError("The provided event slice configuration {0} cannot be parsed because "
+                             "of {1}".format(slice_settings, slice_setting))
+    return all_ranges
+
+
+def get_ranges_from_event_slice_setting(string_to_parse):
+    parsed_elements = parse_event_slice_setting(string_to_parse)
+    if not parsed_elements:
+        return
+    # We have the elements in the form [[a, b], [c, d], ...] but want [a, c, ...] and [b, d, ...]
+    lower = [element[0] for element in parsed_elements]
+    upper = [element[1] for element in parsed_elements]
+    return lower, upper
+
+
+def get_bins_for_rebin_setting(min_value, max_value, step_value, step_type):
+    """
+    Creates a list of bins for the rebin setting.
+
+    @param min_value: the minimum value
+    @param max_value: the maximum value
+    @param step_value: the step value
+    @param step_type: the step type, ie if linear or logarithmic
+    @return: a list of bin values
+    """
+    lower_bound = min_value
+    bins = []
+    while lower_bound < max_value:
+
+        bins.append(lower_bound)
+        # We can either have linear or logarithmic steps. The logarithmic step depends on the lower bound.
+        if step_type is RangeStepType.Lin:
+            step = step_value
+        else:
+            step = lower_bound*step_value
+
+        # Check if the step will bring us out of bounds. If so, then set the new upper value to the max_value
+        upper_bound = lower_bound + step
+        upper_bound = upper_bound if upper_bound < max_value else max_value
+
+        # Now we advance the lower bound
+        lower_bound = upper_bound
+    # Add the last lower_bound
+    bins.append(lower_bound)
+    return bins
+
+
+def get_range_lists_from_bin_list(bin_list):
+    return bin_list[:-1], bin_list[1:]
+
+
+def get_ranges_for_rebin_setting(min_value, max_value, step_value, step_type):
+    """
+    Creates two lists of lower and upper bounds for the
+
+    @param min_value: the minimum value
+    @param max_value: the maximum value
+    @param step_value: the step value
+    @param step_type: the step type, ie if linear or logarithmic
+    @return: two ranges lists, one for the lower and one for the upper bounds.
+    """
+    bins = get_bins_for_rebin_setting(min_value, max_value, step_value, step_type)
+    return get_range_lists_from_bin_list(bins)
+
+
+def get_ranges_for_rebin_array(rebin_array):
+    """
+    Converts a rebin string into min, step (+ step_type), max
+
+    @param rebin_array: a simple rebin array, ie min, step, max
+    @return: two ranges lists, one for the lower and one for the upper bounds.
+    """
+    min_value = rebin_array[0]
+    step_value = rebin_array[1]
+    max_value = rebin_array[2]
+    step_type = RangeStepType.Lin if step_value >= 0. else RangeStepType.Log
+    step_value = abs(step_value)
+    return get_ranges_for_rebin_setting(min_value, max_value, step_value, step_type)
diff --git a/scripts/SANS/sans/common/xml_parsing.py b/scripts/SANS/sans/common/xml_parsing.py
index 13e576f6598e855bd67f564f71f2b4fe1d80f581..ea80a02eaec2050c1978422adf6f8464891604f1 100644
--- a/scripts/SANS/sans/common/xml_parsing.py
+++ b/scripts/SANS/sans/common/xml_parsing.py
@@ -8,6 +8,7 @@ try:
     import xml.etree.cElementTree as eTree
 except ImportError:
     import xml.etree.ElementTree as eTree
+from mantid.kernel import DateAndTime
 
 
 def get_named_elements_from_ipf_file(ipf_file, names_to_search, value_type):
@@ -76,3 +77,9 @@ def get_monitor_names_from_idf_file(idf_file):
                 else:
                     continue
     return output
+
+
+def get_valid_to_time_from_idf_string(idf_string):
+    tree_root = eTree.fromstring(idf_string)
+    valid_to_date = tree_root.attrib["valid-to"]
+    return DateAndTime(valid_to_date)
diff --git a/scripts/SANS/sans/state/compatibility.py b/scripts/SANS/sans/state/compatibility.py
new file mode 100644
index 0000000000000000000000000000000000000000..1048c60f150a39a4078d419457831809f6a96e15
--- /dev/null
+++ b/scripts/SANS/sans/state/compatibility.py
@@ -0,0 +1,52 @@
+# pylint: disable=too-few-public-methods
+
+"""State which governs the SANS compatibility mode. This is not part of the reduction itself and should be removed
+   once the transition to the new reducer is satisfactory and complete. This feature allows users to have the
+   two reduction approaches produce the exact same results. If the results are different then that is a hint
+   that we are dealing with a bug
+"""
+
+import copy
+from sans.state.state_base import (StateBase, rename_descriptor_names, BoolParameter, StringParameter)
+from sans.state.automatic_setters import (automatic_setters)
+from sans.common.enums import SANSInstrument
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# State
+# ----------------------------------------------------------------------------------------------------------------------
+@rename_descriptor_names
+class StateCompatibility(StateBase):
+    use_compatibility_mode = BoolParameter()
+    time_rebin_string = StringParameter()
+
+    def __init__(self):
+        super(StateCompatibility, self).__init__()
+        self.use_compatibility_mode = False
+        self.time_rebin_string = ""
+
+    def validate(self):
+        pass
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Builder
+# ----------------------------------------------------------------------------------------------------------------------
+class StateCompatibilityBuilder(object):
+    @automatic_setters(StateCompatibility)
+    def __init__(self):
+        super(StateCompatibilityBuilder, self).__init__()
+        self.state = StateCompatibility()
+
+    def build(self):
+        self.state.validate()
+        return copy.copy(self.state)
+
+
+def get_compatibility_builder(data_info):
+    instrument = data_info.instrument
+    if instrument is SANSInstrument.LARMOR or instrument is SANSInstrument.LOQ or instrument is SANSInstrument.SANS2D:
+        return StateCompatibilityBuilder()
+    else:
+        raise NotImplementedError("StateCompatibilityBuilder: Could not find any valid compatibility builder for the "
+                                  "specified StateData object {0}".format(str(data_info)))
diff --git a/scripts/SANS/sans/state/mask.py b/scripts/SANS/sans/state/mask.py
index f0385d9dc980586137273fa2b4699a7b881b54d4..70a3cc56f507f3f48c701237e21019a7c3fb3bd5 100644
--- a/scripts/SANS/sans/state/mask.py
+++ b/scripts/SANS/sans/state/mask.py
@@ -217,7 +217,7 @@ class StateMask(StateBase):
                 if not find_full_file_path(mask_file):
                     entry = validation_message("Mask file not found.",
                                                "Makes sure that the mask file is in your path",
-                                               {"mask_file": self.mask_file})
+                                               {"mask_file": self.mask_files})
                     is_invalid.update(entry)
 
         # --------------------
diff --git a/scripts/SANS/sans/state/state.py b/scripts/SANS/sans/state/state.py
index 5d596f49cb5c555b6ff376be6c26e60b6d1aeedd..e186dd215e24e991df237e2ad2658a88a3643c3a 100644
--- a/scripts/SANS/sans/state/state.py
+++ b/scripts/SANS/sans/state/state.py
@@ -22,6 +22,10 @@ from sans.state.scale import StateScale
 from sans.state.convert_to_q import StateConvertToQ
 from sans.state.automatic_setters import (automatic_setters)
 
+# Note that the compatibiliy state is not part of the new reduction chain, but allows us to accurately compare
+# results obtained via the old and new reduction chain
+from sans.state.compatibility import (StateCompatibility, get_compatibility_builder)
+
 
 # ----------------------------------------------------------------------------------------------------------------------
 # State
@@ -38,6 +42,7 @@ class State(StateBase):
     scale = TypedParameter(StateScale, validator_sub_state)
     adjustment = TypedParameter(StateAdjustment, validator_sub_state)
     convert_to_q = TypedParameter(StateConvertToQ, validator_sub_state)
+    compatibility = TypedParameter(StateCompatibility, validator_sub_state)
 
     def __init__(self):
         super(State, self).__init__()
@@ -67,6 +72,11 @@ class State(StateBase):
         if not self.convert_to_q:
             is_invalid.update("State: The state object needs to include a StateConvertToQ object.")
 
+        # We don't enforce a compatibility mode, we just create one if it does not exist
+        if not self.compatibility:
+            if self.data:
+                self.compatibility = get_compatibility_builder(self.data).build()
+
         if is_invalid:
             raise ValueError("State: There is an issue with your in put. See: {0}".format(json.dumps(is_invalid)))
 
diff --git a/scripts/SANS/sans/state/state_functions.py b/scripts/SANS/sans/state/state_functions.py
index c68e61498500a35f1a7239db44c4e602374b0309..c109752650e6a9d63f1a8c2c9f0d360dbf41497a 100644
--- a/scripts/SANS/sans/state/state_functions.py
+++ b/scripts/SANS/sans/state/state_functions.py
@@ -79,7 +79,7 @@ def get_output_workspace_name(state, reduction_mode):
 
     # 5. Wavelength range
     wavelength = state.wavelength
-    wavelength_range_string = str(wavelength.wavelength_low) + "_" + str(wavelength.wavelength_high)
+    wavelength_range_string = "_" + str(wavelength.wavelength_low) + "_" + str(wavelength.wavelength_high)
 
     # 6. Phi Limits
     mask = state.mask
diff --git a/scripts/SANS/sans/test_helper/__init__.py b/scripts/SANS/sans/test_helper/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/scripts/test/SANS/state/test_director.py b/scripts/SANS/sans/test_helper/test_director.py
similarity index 100%
rename from scripts/test/SANS/state/test_director.py
rename to scripts/SANS/sans/test_helper/test_director.py
diff --git a/scripts/SANS/sans/user_file/__init__.py b/scripts/SANS/sans/user_file/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/scripts/SANS/sans/user_file/user_file_common.py b/scripts/SANS/sans/user_file/user_file_common.py
new file mode 100644
index 0000000000000000000000000000000000000000..5b0f9a58655d315d1f080baa7677476c292df793
--- /dev/null
+++ b/scripts/SANS/sans/user_file/user_file_common.py
@@ -0,0 +1,136 @@
+from collections import namedtuple
+from sans.common.enums import serializable_enum
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+#  Named tuples for passing around data in a structured way, a bit like a plain old c-struct.
+# ----------------------------------------------------------------------------------------------------------------------
+# General
+range_entry = namedtuple('range_entry', 'start, stop')
+range_entry_with_detector = namedtuple('range_entry_with_detector', 'start, stop, detector_type')
+single_entry_with_detector = namedtuple('range_entry_with_detector', 'entry, detector_type')
+
+# Back
+back_single_monitor_entry = namedtuple('back_single_monitor_entry', 'monitor, start, stop')
+
+# Limits
+mask_angle_entry = namedtuple('mask_angle_entry', 'min, max, use_mirror')
+simple_range = namedtuple('simple_range', 'start, stop, step, step_type')
+complex_range = namedtuple('complex_steps', 'start, step1, mid, step2, stop, step_type1, step_type2')
+rebin_string_values = namedtuple('rebin_string_values', 'value')
+event_binning_string_values = namedtuple('event_binning_string_values', 'value')
+
+# Mask
+mask_line = namedtuple('mask_line', 'width, angle, x, y')
+mask_block = namedtuple('mask_block', 'horizontal1, horizontal2, vertical1, vertical2, detector_type')
+mask_block_cross = namedtuple('mask_block_cross', 'horizontal, vertical, detector_type')
+
+# Set
+position_entry = namedtuple('position_entry', 'pos1, pos2, detector_type')
+set_scales_entry = namedtuple('set_scales_entry', 's, a, b, c, d')
+
+# Fit
+range_entry_fit = namedtuple('range_entry_fit', 'start, stop, fit_type')
+fit_general = namedtuple('fit_general', 'start, stop, fit_type, data_type, polynomial_order')
+
+# Mon
+monitor_length = namedtuple('monitor_length', 'length, spectrum, interpolate')
+monitor_spectrum = namedtuple('monitor_spectrum', 'spectrum, is_trans, interpolate')
+monitor_file = namedtuple('monitor_file', 'file_path, detector_type')
+
+# Det
+det_fit_range = namedtuple('det_fit_range', 'start, stop, use_fit')
+
+# ------------------------------------------------------------------
+# --- State director keys ------------------------------------------
+# ------------------------------------------------------------------
+
+
+# --- DET
+@serializable_enum("reduction_mode", "rescale", "shift", "rescale_fit", "shift_fit", "correction_x", "correction_y",
+                   "correction_z", "correction_rotation", "correction_radius", "correction_translation",
+                   "correction_x_tilt", "correction_y_tilt")
+class DetectorId(object):
+    pass
+
+
+# --- LIMITS
+@serializable_enum("angle", "events_binning", "events_binning_range", "radius_cut", "wavelength_cut", "radius", "q",
+                   "qxy", "wavelength")
+class LimitsId(object):
+    pass
+
+
+# --- MASK
+@serializable_enum("line", "time", "time_detector", "clear_detector_mask", "clear_time_mask", "single_spectrum_mask",
+                   "spectrum_range_mask", "vertical_single_strip_mask", "vertical_range_strip_mask", "file",
+                   "horizontal_single_strip_mask", "horizontal_range_strip_mask", "block", "block_cross")
+class MaskId(object):
+    pass
+
+
+# --- SAMPLE
+@serializable_enum("path", "offset")
+class SampleId(object):
+    pass
+
+
+# --- SET
+@serializable_enum("scales", "centre")
+class SetId(object):
+    pass
+
+
+# --- TRANS
+@serializable_enum("spec", "spec_shift", "radius", "roi", "mask", "sample_workspace", "can_workspace")
+class TransId(object):
+    pass
+
+
+# --- TUBECALIBFILE
+@serializable_enum("file")
+class TubeCalibrationFileId(object):
+    pass
+
+
+# -- QRESOLUTION
+@serializable_enum("on", "delta_r", "collimation_length", "a1", "a2", "h1", "w1", "h2", "w2", "moderator")
+class QResolutionId(object):
+    pass
+
+
+# --- FIT
+@serializable_enum("clear", "monitor_times", "general")
+class FitId(object):
+    pass
+
+
+# --- GRAVITY
+@serializable_enum("on_off", "extra_length")
+class GravityId(object):
+    pass
+
+
+# --- MON
+@serializable_enum("length", "direct", "flat", "hab", "spectrum", "spectrum_trans", "interpolate")
+class MonId(object):
+    pass
+
+
+# --- PRINT
+@serializable_enum("print_line")
+class PrintId(object):
+    pass
+
+
+# -- BACK
+@serializable_enum("all_monitors", "single_monitors", "monitor_off", "trans")
+class BackId(object):
+    pass
+
+
+# -- OTHER - not settable in user file
+@serializable_enum("reduction_dimensionality", "use_full_wavelength_range", "event_slices",
+                   "use_compatibility_mode")
+class OtherId(object):
+    pass
diff --git a/scripts/SANS/sans/user_file/user_file_parser.py b/scripts/SANS/sans/user_file/user_file_parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb49bc4c125ab8eff52ba89ce555eda3b43dfb39
--- /dev/null
+++ b/scripts/SANS/sans/user_file/user_file_parser.py
@@ -0,0 +1,2058 @@
+# pylint: disable=too-many-lines, invalid-name, too-many-instance-attributes, too-many-branches, too-few-public-methods
+
+import abc
+import re
+from math import copysign
+
+
+from sans.common.enums import (ISISReductionMode, DetectorType, RangeStepType, FitType, DataType)
+from sans.user_file.user_file_common import (DetectorId, BackId, range_entry, back_single_monitor_entry,
+                                             single_entry_with_detector, mask_angle_entry, LimitsId,
+                                             simple_range, complex_range, MaskId, mask_block, mask_block_cross,
+                                             mask_line, range_entry_with_detector, SampleId, SetId, set_scales_entry,
+                                             position_entry, TransId, TubeCalibrationFileId, QResolutionId, FitId,
+                                             fit_general, MonId, monitor_length, monitor_file, GravityId,
+                                             monitor_spectrum, PrintId, det_fit_range)
+
+
+# -----------------------------------------------------------------
+# --- Free Functions     ------------------------------------------
+# -----------------------------------------------------------------
+def convert_string_to_float(to_convert):
+    return float(to_convert.strip())
+
+
+def convert_string_to_integer(to_convert):
+    return int(to_convert.strip())
+
+
+def extract_range(to_extract, converter):
+    # Remove leading and trailing whitespace
+    to_extract = to_extract.strip()
+    # Collapse multiple central whitespaces to a single one
+    to_extract = ' '.join(to_extract.split())
+
+    entries_string = to_extract.split()
+    number_of_entries = len(entries_string)
+    if number_of_entries != 2:
+        raise RuntimeError("Expected a range defined by two numbers,"
+                           " but instead received {0}".format(number_of_entries))
+
+    return [converter(entries_string[0]),
+            converter(entries_string[1])]
+
+
+def extract_float_range(to_extract):
+    return extract_range(to_extract, convert_string_to_float)
+
+
+def extract_int_range(to_extract):
+    return extract_range(to_extract, convert_string_to_integer)
+
+
+def extract_list(to_extract, separator, converter):
+    to_extract = to_extract.strip()
+    to_extract = ' '.join(to_extract.split())
+    string_list = [element.replace(" ", "") for element in to_extract.split(separator)]
+    return [converter(element) for element in string_list]
+
+
+def extract_float_list(to_extract, separator=","):
+    return extract_list(to_extract, separator, convert_string_to_float)
+
+
+def extract_string_list(to_extract, separator=","):
+    return extract_list(to_extract, separator, lambda x: x)
+
+
+def extract_float_range_midpoint_and_steps(to_extract, separator):
+    to_extract = ' '.join(to_extract.split())
+
+    entries_string = to_extract.split(separator)
+    number_of_entries = len(entries_string)
+    if number_of_entries != 5:
+        raise RuntimeError("Expected a range defined by 5 numbers,"
+                           " but instead received {0}".format(number_of_entries))
+
+    return [convert_string_to_float(entries_string[0]),
+            convert_string_to_float(entries_string[1]),
+            convert_string_to_float(entries_string[2]),
+            convert_string_to_float(entries_string[3]),
+            convert_string_to_float(entries_string[4])]
+
+
+def does_pattern_match(compiled_regex, line):
+    return compiled_regex.match(line) is not None
+
+
+def escape_special_characters_for_file_path(to_escape):
+    escape = {"\a": "\\a", "\b": "\\b", r"\c": "\\c", "\f": "\\f",
+              "\n": "\\n", "\r": "\\r", "\t": "\\t", "\v": "\\v"}
+    keys = list(escape.keys())
+    escaped = to_escape
+    for key in keys:
+        escaped = escaped.replace(key, escape[key])
+    escaped = escaped.replace("\\", "/")
+    return escaped
+
+
+# -----------------------------------------------------------------
+# --- Common Regex Strings-----------------------------------------
+# -----------------------------------------------------------------
+float_number = "[-+]?(\\d*[.])?\\d+"
+integer_number = "[-+]?\\d+"
+positive_float_number = "[+]?(\\d*[.])?\\d+"
+start_string = "^\\s*"
+end_string = "\\s*$"
+space_string = "\\s+"
+rebin_string = "(\\s*[-+]?\\d+(\\.\\d+)?)(\\s*,\\s*[-+]?\\d+(\\.\\d+)?)*"
+
+
+# ----------------------------------------------------------------
+# --- Parsers ----------------------------------------------------
+# ----------------------------------------------------------------
+class UserFileComponentParser(object):
+    separator_dash = "/"
+    separator_space = "\\s"
+    separator_equal = "="
+
+    @abc.abstractmethod
+    def parse_line(self, line):
+        pass
+
+    @staticmethod
+    @abc.abstractmethod
+    def get_type():
+        pass
+
+    @staticmethod
+    @abc.abstractmethod
+    def get_type_pattern():
+        pass
+
+    @staticmethod
+    def get_settings(line, command_pattern):
+        line = line.strip()
+        line = line.upper()
+        setting = re.sub(command_pattern, "", line)
+        setting = setting.strip()
+        return setting.upper()
+
+
+class BackParser(UserFileComponentParser):
+    """
+    The BackParser handles the following structure
+        Command | Qualifer    | Parameter
+        BACK    / MON/TIMES     t1 t2
+        BACK    / M m/TIMES      t1 t2
+        BACK    / M m            t1 t2
+        BACK    / M m/OFF
+        BACK    / TRANS          t1 t2
+    """
+    Type = "BACK"
+
+    def __init__(self):
+        super(BackParser, self).__init__()
+
+        # General
+        self._times = "\\s*/\\s*TIMES"
+
+        # All Monitors
+        self._all_mons = "\\s*MON\\s*/\\s*TIMES\\s*"
+        self._all_mons_pattern = re.compile(start_string + self._all_mons + space_string + float_number +
+                                            space_string + float_number + end_string)
+
+        # Single Monitor
+        self._mon_id = "M"
+        self._single_monitor = "\\s*" + self._mon_id + integer_number + "\\s*"
+        self._single_monitor_pattern = re.compile(start_string + self._single_monitor +
+                                                  "(\\s*" + self._times + "\\s*)?" + space_string + float_number +
+                                                  space_string + float_number + end_string)
+
+        # Off
+        self._off_pattern = re.compile(start_string + self._single_monitor + "\\s*/\\s*OFF\\s*" + end_string)
+
+        # Trans
+        self._trans = "TRANS"
+        self._trans_pattern = re.compile(start_string + self._trans + space_string + float_number +
+                                         space_string + float_number)
+
+    def parse_line(self, line):
+        # Get the settings, ie remove command
+        setting = UserFileComponentParser.get_settings(line, BackParser.get_type_pattern())
+
+        # Determine the qualifier and extract the user setting
+        if self._is_all_mon(setting):
+            output = self._extract_all_mon(setting)
+        elif self._is_single_mon(setting):
+            output = self._extract_single_mon(setting)
+        elif self._is_off(setting):
+            output = self._extract_off(setting)
+        elif self._is_trans(setting):
+            output = self._extract_trans(setting)
+        else:
+            raise RuntimeError("BackParser: Unknown command for BACK: {0}".format(line))
+        return output
+
+    def _is_all_mon(self, line):
+        return does_pattern_match(self._all_mons_pattern, line)
+
+    def _is_single_mon(self, line):
+        return does_pattern_match(self._single_monitor_pattern, line)
+
+    def _is_off(self, line):
+        return does_pattern_match(self._off_pattern, line)
+
+    def _is_trans(self, line):
+        return does_pattern_match(self._trans_pattern, line)
+
+    def _extract_all_mon(self, line):
+        all_mons_string = re.sub(self._all_mons, "", line)
+        time_range = extract_float_range(all_mons_string)
+        return {BackId.all_monitors: range_entry(start=time_range[0], stop=time_range[1])}
+
+    def _extract_single_mon(self, line):
+        monitor_number = self._get_monitor_number(line)
+        single_string = re.sub(self._times, "", line)
+        all_mons_string = re.sub(self._single_monitor, "", single_string)
+        time_range = extract_float_range(all_mons_string)
+        return {BackId.single_monitors: back_single_monitor_entry(monitor=monitor_number, start=time_range[0],
+                                                                  stop=time_range[1])}
+
+    def _extract_off(self, line):
+        monitor_number = self._get_monitor_number(line)
+        return {BackId.monitor_off: monitor_number}
+
+    def _extract_trans(self, line):
+        trans_string = re.sub(self._trans, "", line)
+        time_range = extract_float_range(trans_string)
+        return {BackId.trans: range_entry(start=time_range[0], stop=time_range[1])}
+
+    def _get_monitor_number(self, line):
+        monitor_selection = re.search(self._single_monitor, line).group(0)
+        monitor_selection = monitor_selection.strip()
+        monitor_number_string = re.sub(self._mon_id, "", monitor_selection)
+        return convert_string_to_integer(monitor_number_string)
+
+    @staticmethod
+    def get_type():
+        return BackParser.Type
+
+    @staticmethod
+    @abc.abstractmethod
+    def get_type_pattern():
+        return "\\s*" + BackParser.get_type() + "\\s*/\\s*"
+
+
+class DetParser(UserFileComponentParser):
+    """
+    The DetParser handles the following structure
+        1) Corrections:
+            DET/CORR/FRONT/qualifier [parameter]
+            DET/CORR/REAR/qualifier [parameter]
+              qualifiers are:
+              X , Y, Z, ROT, RADIUS, SIDE, XTILT, YTILT
+
+            Note that illegally the combination DET/CORR FRONT qualifier is accepted by the old ISIS SANS reduction
+            code, therefore we need to support it here
+
+        2) Reduction Mode
+            DET/FRONT
+            DET/REAR
+            DET/BOTH
+            DET/MERGED
+            DET/MERGE
+            DET/MAIN
+            DET/HAB
+
+        3) Settings for merged operation
+            DET/RESCALE rescale
+            DET/SHIFT shift
+            DET/RESCALE/FIT [Q1 Q2]
+            DET/SHIFT/FIT [Q1 Q2]
+    """
+    Type = "DET"
+
+    def __init__(self):
+        super(DetParser, self).__init__()
+        # Reduction mode
+        self._HAB = ["FRONT", "HAB"]
+        self._LAB = ["REAR", "MAIN"]
+        self._BOTH = ["BOTH"]
+        self._MERGE = ["MERGE", "MERGED"]
+        self._reduction_mode = []
+        self._reduction_mode.extend(self._BOTH)
+        self._reduction_mode.extend(self._LAB)
+        self._reduction_mode.extend(self._HAB)
+        self._reduction_mode.extend(self._MERGE)
+
+        # Corrections
+        self._x = "\\s*X\\s*"
+        self._x_pattern = re.compile(start_string + self._x + space_string + float_number + end_string)
+        self._y = "\\s*Y\\s*"
+        self._y_pattern = re.compile(start_string + self._y + space_string + float_number + end_string)
+        self._z = "\\s*Z\\s*"
+        self._z_pattern = re.compile(start_string + self._z + space_string + float_number + end_string)
+        self._rotation = "\\s*ROT\\s*"
+        self._rotation_pattern = re.compile(start_string + self._rotation + space_string + float_number + end_string)
+        self._translation = "\\s*SIDE\\s*"
+        self._translation_pattern = re.compile(start_string + self._translation + space_string +
+                                               float_number + end_string)
+        self._x_tilt = "\\s*XTILT\\s*"
+        self._x_tilt_pattern = re.compile(start_string + self._x_tilt + space_string + float_number + end_string)
+        self._y_tilt = "\\s*YTILT\\s*"
+        self._y_tilt_pattern = re.compile(start_string + self._y_tilt + space_string + float_number + end_string)
+
+        self._radius = "\\s*RADIUS\\s*"
+        self._radius_pattern = re.compile(start_string + self._radius + space_string + float_number + end_string)
+        self._correction_lab = "\\s*CORR\\s*[/]?\\s*REAR\\s*[/]?\\s*"
+        self._correction_hab = "\\s*CORR\\s*[/]?\\s*FRONT\\s*[/]?\\s*"
+        self._correction_LAB_pattern = re.compile(start_string + self._correction_lab)
+        self._correction_HAB_pattern = re.compile(start_string + self._correction_hab)
+
+        # Merge options
+        self._rescale = "\\s*RESCALE\\s*"
+        self._rescale_pattern = re.compile(start_string + self._rescale + space_string + float_number + end_string)
+        self._shift = "\\s*SHIFT\\s*"
+        self._shift_pattern = re.compile(start_string + self._shift + space_string + float_number + end_string)
+        self._rescale_fit = "\\s*RESCALE\\s*/\\s*FIT\\s*"
+        self._rescale_fit_pattern = re.compile(start_string + self._rescale_fit + space_string +
+                                               float_number + space_string +
+                                               float_number + end_string)
+        self._shift_fit = "\\s*SHIFT\\s*/\\s*FIT\\s*"
+        self._shift_fit_pattern = re.compile(start_string + self._shift_fit + space_string +
+                                             float_number + space_string +
+                                             float_number + end_string)
+
+    def parse_line(self, line):
+        # Get the settings, ie remove command
+        setting = UserFileComponentParser.get_settings(line, DetParser.get_type_pattern())
+
+        # Determine the qualifier and extract the user setting
+        if self._is_reduction_mode_setting(setting):
+            output = self._extract_reduction_mode(setting)
+        elif self._is_correction_setting(setting):
+            output = self._extract_correction(setting)
+        elif self._is_merge_option_setting(setting):
+            output = self._extract_merge_option(setting)
+        else:
+            raise RuntimeError("DetParser: Unknown command for DET: {0}".format(line))
+        return output
+
+    def _is_reduction_mode_setting(self, line):
+        front_element = line.split(UserFileComponentParser.separator_dash, 1)[0]
+        return front_element in self._reduction_mode
+
+    def _is_correction_setting(self, line):
+        return does_pattern_match(self._correction_HAB_pattern, line) or \
+               does_pattern_match(self._correction_LAB_pattern, line)
+
+    def _is_merge_option_setting(self, line):
+        return does_pattern_match(self._rescale_pattern, line) or \
+               does_pattern_match(self._shift_pattern, line) or \
+               does_pattern_match(self._rescale_fit_pattern, line) or \
+               does_pattern_match(self._shift_fit_pattern, line)
+
+    def _extract_reduction_mode(self, line):
+        line_capital = line.upper()
+        if line_capital in self._HAB:
+            return {DetectorId.reduction_mode: ISISReductionMode.HAB}
+        elif line_capital in self._LAB:
+            return {DetectorId.reduction_mode: ISISReductionMode.LAB}
+        elif line_capital in self._BOTH:
+            return {DetectorId.reduction_mode: ISISReductionMode.All}
+        elif line_capital in self._MERGE:
+            return {DetectorId.reduction_mode: ISISReductionMode.Merged}
+        else:
+            raise RuntimeError("DetParser:  Could not extract line: {0}".format(line))
+
+    def _extract_correction(self, line):
+        if self._correction_HAB_pattern.match(line) is not None:
+            qualifier = re.sub(self._correction_hab, "", line)
+            qualifier = qualifier.strip()
+            return self._extract_detector_setting(qualifier, DetectorType.HAB)
+        elif self._correction_LAB_pattern.match(line) is not None:
+            qualifier = re.sub(self._correction_lab, "", line)
+            qualifier = qualifier.strip()
+            return self._extract_detector_setting(qualifier, DetectorType.LAB)
+        else:
+            raise RuntimeError("DetParser: Could not extract line: {0}".format(line))
+
+    def _extract_detector_setting(self, qualifier, detector_type):
+        if self._x_pattern.match(qualifier):
+            value_string = re.sub(self._x, "", qualifier)
+            key = DetectorId.correction_x
+        elif self._y_pattern.match(qualifier):
+            value_string = re.sub(self._y, "", qualifier)
+            key = DetectorId.correction_y
+        elif self._z_pattern.match(qualifier):
+            value_string = re.sub(self._z, "", qualifier)
+            key = DetectorId.correction_z
+        elif self._rotation_pattern.match(qualifier):
+            value_string = re.sub(self._rotation, "", qualifier)
+            key = DetectorId.correction_rotation
+        elif self._translation_pattern.match(qualifier):
+            value_string = re.sub(self._translation, "", qualifier)
+            key = DetectorId.correction_translation
+        elif self._radius_pattern.match(qualifier):
+            value_string = re.sub(self._radius, "", qualifier)
+            key = DetectorId.correction_radius
+        elif self._x_tilt_pattern.match(qualifier):
+            value_string = re.sub(self._x_tilt, "", qualifier)
+            key = DetectorId.correction_x_tilt
+        elif self._y_tilt_pattern.match(qualifier):
+            value_string = re.sub(self._y_tilt, "", qualifier)
+            key = DetectorId.correction_y_tilt
+        else:
+            raise RuntimeError("DetParser: Unknown qualifier encountered: {0}".format(qualifier))
+
+        # Qualify the key with the selected detector
+        value_string = value_string.strip()
+        value = convert_string_to_float(value_string)
+        return {key: single_entry_with_detector(entry=value, detector_type=detector_type)}
+
+    def _extract_merge_option(self, line):
+        if self._rescale_pattern.match(line) is not None:
+            rescale_string = re.sub(self._rescale, "", line)
+            rescale = convert_string_to_float(rescale_string)
+            return {DetectorId.rescale: rescale}
+        elif self._shift_pattern.match(line) is not None:
+            shift_string = re.sub(self._shift, "", line)
+            shift = convert_string_to_float(shift_string)
+            return {DetectorId.shift: shift}
+        elif self._rescale_fit_pattern.match(line) is not None:
+            rescale_fit_string = re.sub(self._rescale_fit, "", line)
+            if rescale_fit_string:
+                rescale_fit = extract_float_range(rescale_fit_string)
+                value = det_fit_range(start=rescale_fit[0], stop=rescale_fit[1], use_fit=True)
+            else:
+                value = det_fit_range(start=None, stop=None, use_fit=True)
+            return {DetectorId.rescale_fit: value}
+        elif self._shift_fit_pattern.match(line) is not None:
+            shift_fit_string = re.sub(self._shift_fit, "", line)
+            if shift_fit_string:
+                shift_fit = extract_float_range(shift_fit_string)
+                value = det_fit_range(start=shift_fit[0], stop=shift_fit[1], use_fit=True)
+            else:
+                value = det_fit_range(start=None, stop=None, use_fit=True)
+            return {DetectorId.shift_fit: value}
+        else:
+            raise RuntimeError("DetParser: Could not extract line: {0}".format(line))
+
+    @staticmethod
+    def get_type():
+        return DetParser.Type
+
+    @staticmethod
+    @abc.abstractmethod
+    def get_type_pattern():
+        return "\\s*" + DetParser.get_type() + "\\s*/\\s*"
+
+
+class LimitParser(UserFileComponentParser):
+    """
+    The LimitParser handles the following structure for
+        L/PHI[/NOMIRROR] d1 d2
+
+        L/Q/ q1 q2 [dq[/LIN]]  or  L/Q q1 q2 [dq[/LOG]]
+        L/Q q1,dq1,q3,dq2,q2 [/LIN]]  or  L/Q q1,dq1,q3,dq2,q2 [/LOG]]
+
+        L/Q/RCut c
+        L/Q/WCut c
+
+        L/QXY qxy1 qxy2 [dqxy[/LIN]]  or  L/QXY qxy1 qxy2 [dqxy[/LOG]]
+        L/QXY qxy1,dqxy1,qxy3,dqxy2,qxy2 [/LIN]]  or  L/QXY qxy1,dqxy1,qxy3,dqxy2,qxy2 [/LOG]]
+
+        L/R r1 r2  or undocumented L/R  r1 r2 step where step is actually ignored
+
+        L/WAV l1 l2 [dl[/LIN]  or  L/WAV l1 l2 [dl[/LOG]
+        L/WAV l1,dl1,l3,dl2,l2 [/LIN]  or  L/WAV l1,dl1,l3,dl2,l2 [/LOG]
+
+        L/EVENTSTIME rebin_str
+    """
+    Type = "L"
+
+    def __init__(self):
+        super(LimitParser, self).__init__()
+
+        # ranges
+        self._lin = "\\s*/\\s*LIN\\s*"
+        self._log = "\\s*/\\s*LOG\\s*"
+        self._lin_or_log = self._lin + "|" + self._log
+        self._simple_step = "(\\s+" + float_number + "\\s*(" + self._lin_or_log + ")?)?"
+        self._range = float_number + "\\s+" + float_number
+        self._simple_range = "\\s*" + self._range + self._simple_step
+
+        self._comma = "\\s*,\\s*"
+        self._complex_range = "\\s*" + float_number + self._comma + float_number + self._comma + float_number + \
+                              self._comma + float_number + self._comma + float_number +\
+                              "(\\s*" + self._lin_or_log + ")?"
+
+        # Angle limits
+        self._phi_no_mirror = "\\s*/\\s*NOMIRROR\\s*"
+        self._phi = "\\s*PHI\\s*(" + self._phi_no_mirror + ")?\\s*"
+        self._phi_pattern = re.compile(start_string + self._phi + space_string +
+                                       float_number + space_string +
+                                       float_number + end_string)
+
+        # Event time limits
+        self._events_time = "\\s*EVENTSTIME\\s*"
+        self._events_time_pattern = re.compile(start_string + self._events_time +
+                                               space_string + rebin_string + end_string)
+
+        self._events_time_pattern_simple_pattern = re.compile(start_string + self._events_time +
+                                                              space_string + self._simple_range + end_string)
+
+        # Q Limits
+        self._q = "\\s*Q\\s*"
+        self._q_simple_pattern = re.compile(start_string + self._q + space_string +
+                                            self._simple_range + end_string)
+        self._q_complex_pattern = re.compile(start_string + self._q + space_string + self._complex_range + end_string)
+
+        # Qxy limits
+        self._qxy = "\\s*QXY\\s*"
+        self._qxy_simple_pattern = re.compile(start_string + self._qxy + space_string + self._simple_range + end_string)
+        self._qxy_complex_pattern = re.compile(start_string + self._qxy + space_string +
+                                               self._complex_range + end_string)
+
+        # Wavelength limits
+        self._wavelength = "\\s*WAV\\s*"
+        self._wavelength_simple_pattern = re.compile(start_string + self._wavelength + space_string +
+                                                     self._simple_range + end_string)
+        self._wavelength_complex_pattern = re.compile(start_string + self._wavelength + space_string +
+                                                      self._complex_range + end_string)
+
+        # Cut limits
+        self._radius_cut = "\\s*Q\\s*/\\s*RCUT\\s*"
+        self._radius_cut_pattern = re.compile(start_string + self._radius_cut + space_string +
+                                              float_number + end_string)
+        self._wavelength_cut = "\\s*Q\\s*/\\s*WCUT\\s*"
+        self._wavelength_cut_pattern = re.compile(start_string + self._wavelength_cut +
+                                                  space_string + float_number + end_string)
+
+        # Radius limits
+        # Note that we have to account for an undocumented potential step size (which is ignored
+        self._radius = "\\s*R\\s*"
+        self._radius_string = start_string + self._radius + space_string + float_number + space_string + float_number +\
+                              "\\s*(" + float_number + ")?\\s*" + end_string  # noqa
+        self._radius_pattern = re.compile(self._radius_string)
+
+    def parse_line(self, line):
+        # Get the settings, ie remove command
+        setting = UserFileComponentParser.get_settings(line, LimitParser.get_type_pattern())
+
+        # Determine the qualifier and extract the user setting
+        if self._is_angle_limit(setting):
+            output = self._extract_angle_limit(setting)
+        elif self._is_event_binning(setting):
+            output = self._extract_event_binning(setting)
+        elif self._is_cut_limit(setting):
+            output = self._extract_cut_limit(setting)
+        elif self._is_radius_limit(setting):
+            output = self._extract_radius_limit(setting)
+        elif self._is_q_limit(setting):
+            output = self._extract_q_limit(setting)
+        elif self._is_wavelength_limit(setting):
+            output = self._extract_wavelength_limit(setting)
+        elif self._is_qxy_limit(setting):
+            output = self._extract_qxy_limit(setting)
+        else:
+            raise RuntimeError("LimitParser: Unknown command for L: {0}".format(line))
+        return output
+
+    def _is_angle_limit(self, line):
+        return does_pattern_match(self._phi_pattern, line)
+
+    def _is_event_binning(self, line):
+        return does_pattern_match(self._events_time_pattern, line) or \
+               does_pattern_match(self._events_time_pattern_simple_pattern, line)
+
+    def _is_cut_limit(self, line):
+        return does_pattern_match(self._radius_cut_pattern, line) or \
+               does_pattern_match(self._wavelength_cut_pattern, line)
+
+    def _is_radius_limit(self, line):
+        return does_pattern_match(self._radius_pattern, line)
+
+    def _is_q_limit(self, line):
+        return does_pattern_match(self._q_simple_pattern, line) or does_pattern_match(self._q_complex_pattern, line)
+
+    def _is_qxy_limit(self, line):
+        return does_pattern_match(self._qxy_simple_pattern, line) or does_pattern_match(self._qxy_complex_pattern, line)
+
+    def _is_wavelength_limit(self, line):
+        return does_pattern_match(self._wavelength_simple_pattern, line) or\
+               does_pattern_match(self._wavelength_complex_pattern, line)
+
+    def _extract_angle_limit(self, line):
+        use_mirror = re.search(self._phi_no_mirror, line) is None
+        angles_string = re.sub(self._phi, "", line)
+        angles = extract_float_range(angles_string)
+        return {LimitsId.angle: mask_angle_entry(min=angles[0], max=angles[1], use_mirror=use_mirror)}
+
+    def _extract_event_binning(self, line):
+        event_binning = re.sub(self._events_time, "", line)
+        if does_pattern_match(self._events_time_pattern_simple_pattern, line):
+            simple_pattern = self._extract_simple_pattern(event_binning, LimitsId.events_binning)
+            rebin_values = simple_pattern[LimitsId.events_binning]
+            prefix = -1. if rebin_values.step_type is RangeStepType.Log else 1.
+            binning_string = str(rebin_values.start) + "," + str(prefix*rebin_values.step) + "," + str(rebin_values.stop)  # noqa
+        else:
+            rebin_values = extract_float_list(event_binning)
+            binning_string = ",".join([str(val) for val in rebin_values])
+        output = {LimitsId.events_binning: binning_string}
+        return output
+
+    def _extract_cut_limit(self, line):
+        if self._radius_cut_pattern.match(line) is not None:
+            key = LimitsId.radius_cut
+            limit_value = re.sub(self._radius_cut, "", line)
+        else:
+            key = LimitsId.wavelength_cut
+            limit_value = re.sub(self._wavelength_cut, "", line)
+        return {key: convert_string_to_float(limit_value)}
+
+    def _extract_radius_limit(self, line):
+        radius_range_string = re.sub(self._radius, "", line)
+        radius_range = extract_float_list(radius_range_string, separator=" ")
+        return {LimitsId.radius: range_entry(start=radius_range[0], stop=radius_range[1])}
+
+    def _extract_q_limit(self, line):
+        q_range = re.sub(self._q, "", line)
+        if does_pattern_match(self._q_simple_pattern, line):
+            output = self._extract_simple_pattern(q_range, LimitsId.q)
+        else:
+            output = self._extract_complex_pattern(q_range, LimitsId.q)
+        return output
+
+    def _extract_qxy_limit(self, line):
+        qxy_range = re.sub(self._qxy, "", line)
+        if does_pattern_match(self._qxy_simple_pattern, line):
+            output = self._extract_simple_pattern(qxy_range, LimitsId.qxy)
+        else:
+            output = self._extract_complex_pattern(qxy_range, LimitsId.qxy)
+        return output
+
+    def _extract_wavelength_limit(self, line):
+        wavelength_range = re.sub(self._wavelength, "", line)
+        if does_pattern_match(self._wavelength_simple_pattern, line):
+            output = self._extract_simple_pattern(wavelength_range, LimitsId.wavelength)
+        else:
+            # This is not implemented in the old parser, hence disable here
+            # output = self._extract_complex_pattern(wavelength_range, LimitsId.wavelength)
+            raise ValueError("Wavelength Limits: The expression {0} is currently not supported."
+                             " Use a simple pattern.".format(line))
+        return output
+
+    def _extract_simple_pattern(self, simple_range_input, tag):
+        if re.sub(self._range, "", simple_range_input, 1) == "":
+            float_range = extract_float_range(simple_range_input)
+            output = {tag: simple_range(start=float_range[0],
+                                        stop=float_range[1],
+                                        step=None,
+                                        step_type=None)}
+        else:
+            # Extract the step information
+            range_removed = re.sub(self._range, "", simple_range_input, 1)
+
+            # Get the step type
+            step_type = self._get_step_type(range_removed)
+
+            # Get the step
+            step_string = re.sub(self._lin_or_log, "", range_removed)
+            step = convert_string_to_float(step_string)
+
+            # Get the range
+            pure_range = re.sub(range_removed, "", simple_range_input)
+            float_range = extract_float_range(pure_range)
+            output = {tag: simple_range(start=float_range[0],
+                                        stop=float_range[1],
+                                        step=step,
+                                        step_type=step_type)}
+        return output
+
+    def _extract_complex_pattern(self, complex_range_input, tag):
+        # Get the step type
+        step_type = self._get_step_type(complex_range_input, default=None)
+
+        # Remove the step type
+        range_with_steps_string = re.sub(self._lin_or_log, "", complex_range_input)
+        range_with_steps = extract_float_range_midpoint_and_steps(range_with_steps_string, ",")
+
+        # Check if there is a sign on the individual steps, this shows if something had been marked as linear or log.
+        # If there is an explicit LOG/LIN command, then this overwrites the sign
+        step_type1 = RangeStepType.Log if copysign(1, range_with_steps[1]) == -1 else RangeStepType.Lin
+        step_type2 = RangeStepType.Log if copysign(1, range_with_steps[3]) == -1 else RangeStepType.Lin
+        if step_type is not None:
+            step_type1 = step_type
+            step_type2 = step_type
+
+        return {tag: complex_range(start=range_with_steps[0],
+                                   step1=abs(range_with_steps[1]),
+                                   mid=range_with_steps[2],
+                                   step2=abs(range_with_steps[3]),
+                                   stop=range_with_steps[4],
+                                   step_type1=step_type1,
+                                   step_type2=step_type2)}
+
+    def _get_step_type(self, range_string, default=RangeStepType.Lin):
+        range_type = default
+        if re.search(self._log, range_string):
+            range_type = RangeStepType.Log
+        elif re.search(self._lin, range_string):
+            range_type = RangeStepType.Lin
+        return range_type
+
+    @staticmethod
+    def get_type():
+        return LimitParser.Type
+
+    @staticmethod
+    @abc.abstractmethod
+    def get_type_pattern():
+        return "\\s*" + LimitParser.get_type() + "\\s*/\\s*"
+
+
+class MaskParser(UserFileComponentParser):
+    """
+    The MaskParser handles the following structure for
+        MASK/CLEAR[/TIME]
+
+        MASK[/REAR/FRONT/HAB] Hn[>Hm]  or  MASK Vn[>Vm]  - to mask single wires or 'strips'
+        MASK[/REAR/FRONT/HAB] Hn>Hm+Vn>Vm or Vn>Vm+Hn>Hm - to mask a rectangular 'box'
+        MASK[/REAR/FRONT/HAB] Hn+Vm or Vm+Hn  - to mask the intersection of Hn and Vm
+
+        MASK Ssp1[>Ssp2]
+
+        MASK[/REAR/FRONT/HAB]/TIME t1 t2 or  MASK[/REAR/FRONT/HAB]/T t1 t2 - if no detector is specfied, then mask
+                                                                             is applied to both detectors.
+
+        MASK/LINE width angle [x y]
+    """
+    Type = "MASK"
+
+    def __init__(self):
+        super(MaskParser, self).__init__()
+        self._time = "\\s*/\\s*TIME\\s*"
+
+        # ranges
+        self._two_floats = "\\s*" + float_number + space_string + float_number + "\\s*"
+        self._optional_two_floats = "(\\s+" + self._two_floats + "\\s*)?\\s*"
+        self._range = "\\s*>\\s*"
+
+        # Line Mask
+        self._line = "\\s*LINE\\s*"
+        self._line_pattern = re.compile(start_string + self._line + space_string + self._two_floats +
+                                        self._optional_two_floats + end_string)
+
+        # Clear Mask
+        self._clear = "\\s*CLEAR\\s*"
+        self._clear_pattern = re.compile(start_string + self._clear + "\\s*(" + self._time + ")?" + end_string)
+
+        # Spectrum Mask
+        self._spectrum = "\\s*S\\s*"
+        self._additional_spectrum = "(\\s*>" + self._spectrum + integer_number+")"
+        self._spectrum_range_pattern = re.compile(start_string + self._spectrum + integer_number +
+                                                  self._additional_spectrum + end_string)
+        self._spectrum_single_pattern = re.compile(start_string + self._spectrum + integer_number + end_string)
+
+        # Strip Masks
+        self._hab = "\\s*HAB|FRONT\\s*"
+        self._lab = "\\s*LAB|REAR|MAIN\\s*"
+        self._detector = "\\s*(" + self._hab + "|" + self._lab + ")?\\s*"
+
+        # Vertical strip Mask
+        self._v = "\\s*V\\s*"
+        self._additional_v = "(\\s*>" + self._v + integer_number + ")"
+        self._single_vertical_strip_pattern = re.compile(start_string + self._detector + self._v +
+                                                         integer_number + end_string)
+        self._range_vertical_strip_pattern = re.compile(start_string + self._detector + self._v +
+                                                        integer_number + self._additional_v + end_string)
+
+        # Horizontal strip Mask
+        self._h = "\\s*H\\s*"
+        self._additional_h = "(\\s*>" + self._h + integer_number + ")"
+        self._single_horizontal_strip_pattern = re.compile(start_string + self._detector + self._h +
+                                                           integer_number + end_string)
+        self._range_horizontal_strip_pattern = re.compile(start_string + self._detector + self._h +
+                                                          integer_number + self._additional_h + end_string)
+
+        # Time Mask
+        self._time_or_t = "\\s*(TIME|T)\\s*"
+        self._detector_time = "\\s*((" + self._hab + "|" + self._lab + ")"+"\\s*/\\s*)?\\s*"
+        self._time_pattern = re.compile(start_string + self._detector_time + self._time_or_t + space_string +
+                                        self._two_floats + end_string)
+
+        # Block mask
+        self._v_plus_h = "\\s*" + self._v + integer_number + "\\s*\\+\\s*" + self._h + integer_number
+        self._h_plus_v = "\\s*" + self._h + integer_number + "\\s*\\+\\s*" + self._v + integer_number
+
+        self._vv_plus_hh = self._v + integer_number + self._additional_v + "\\s*\\+\\s*" + self._h + integer_number +\
+                           self._additional_h  # noqa
+        self._hh_plus_vv = self._h + integer_number + self._additional_h + "\\s*\\+\\s*" + self._v + integer_number +\
+                           self._additional_v  # noqa
+
+        self._blocks = "\\s*(" + self._v_plus_h + "|" + self._h_plus_v + "|" +\
+                       self._vv_plus_hh + "|" + self._hh_plus_vv + ")\\s*"
+        self._block_pattern = re.compile(start_string + self._detector + self._blocks + end_string)
+
+    def parse_line(self, line):
+        # Get the settings, ie remove command
+        setting = UserFileComponentParser.get_settings(line, MaskParser.get_type_pattern())
+
+        # Determine the qualifier and extract the user setting
+        if self._is_block_mask(setting):
+            output = self._extract_block_mask(setting)
+        elif self._is_line_mask(setting):
+            output = self._extract_line_mask(setting)
+        elif self._is_time_mask(setting):
+            output = self._extract_time_mask(setting)
+        elif self._is_clear_mask(setting):
+            output = self._extract_clear_mask(setting)
+        elif self._is_single_spectrum_mask(setting):
+            output = self._extract_single_spectrum_mask(setting)
+        elif self._is_spectrum_range_mask(setting):
+            output = self._extract_spectrum_range_mask(setting)
+        elif self._is_vertical_single_strip_mask(setting):
+            output = self._extract_vertical_single_strip_mask(setting)
+        elif self._is_vertical_range_strip_mask(setting):
+            output = self._extract_vertical_range_strip_mask(setting)
+        elif self._is_horizontal_single_strip_mask(setting):
+            output = self._extract_horizontal_single_strip_mask(setting)
+        elif self._is_horizontal_range_strip_mask(setting):
+            output = self._extract_horizontal_range_strip_mask(setting)
+        else:
+            raise RuntimeError("MaskParser: Unknown command for MASK: {0}".format(line))
+        return output
+
+    def _is_block_mask(self, line):
+        return does_pattern_match(self._block_pattern, line)
+
+    def _is_line_mask(self, line):
+        return does_pattern_match(self._line_pattern, line)
+
+    def _is_time_mask(self, line):
+        return does_pattern_match(self._time_pattern, line)
+
+    def _is_clear_mask(self, line):
+        return does_pattern_match(self._clear_pattern, line)
+
+    def _is_single_spectrum_mask(self, line):
+        return does_pattern_match(self._spectrum_single_pattern, line)
+
+    def _is_spectrum_range_mask(self, line):
+        return does_pattern_match(self._spectrum_range_pattern, line)
+
+    def _is_vertical_single_strip_mask(self, line):
+        return does_pattern_match(self._single_vertical_strip_pattern, line)
+
+    def _is_vertical_range_strip_mask(self, line):
+        return does_pattern_match(self._range_vertical_strip_pattern, line)
+
+    def _is_horizontal_single_strip_mask(self, line):
+        return does_pattern_match(self._single_horizontal_strip_pattern, line)
+
+    def _is_horizontal_range_strip_mask(self, line):
+        return does_pattern_match(self._range_horizontal_strip_pattern, line)
+
+    def _extract_block_mask(self, line):
+        # There are four cases that can exist:
+        # 1. Va > Vb + Hc > Hd
+        # 2. Ha > Hb + Vc > Vd
+        # 3. Va + Hb
+        # 4. Ha + Vb
+        # Record and remove detector type
+        detector_type = DetectorType.HAB if re.search(self._hab, line) is not None else DetectorType.LAB
+        block_string = re.sub(self._detector, "", line)
+        is_true_block = ">" in block_string
+        two_blocks = block_string.split("+")
+        horizontal_part = None
+        vertical_part = None
+        if is_true_block:
+            for block in two_blocks:
+                if self._is_vertical_range_strip_mask(block):
+                    prelim_range = self._extract_vertical_range_strip_mask(block)
+                    # Note we use the lab key word since the extraction defaults to lab
+                    vertical_part = prelim_range[MaskId.vertical_range_strip_mask]
+                elif self._is_horizontal_range_strip_mask(block):
+                    prelim_range = self._extract_horizontal_range_strip_mask(block)
+                    # Note we use the lab key word since the extraction defaults to lab
+                    horizontal_part = prelim_range[MaskId.horizontal_range_strip_mask]
+                else:
+                    raise RuntimeError("MaskParser: Cannot handle part of block mask: {0}".format(block))
+            # Now that we have both parts we can assemble the output
+            output = {MaskId.block: mask_block(horizontal1=horizontal_part.start, horizontal2=horizontal_part.stop,
+                                               vertical1=vertical_part.start, vertical2=vertical_part.stop,
+                                               detector_type=detector_type)}
+        else:
+            for block in two_blocks:
+                if self._is_vertical_single_strip_mask(block):
+                    prelim_single = self._extract_vertical_single_strip_mask(block)
+                    # Note we use the lab key word since the extraction defaults to lab
+                    vertical_part = prelim_single[MaskId.vertical_single_strip_mask]
+                elif self._is_horizontal_single_strip_mask(block):
+                    prelim_single = self._extract_horizontal_single_strip_mask(block)
+                    # Note we use the lab key word since the extraction defaults to lab
+                    horizontal_part = prelim_single[MaskId.horizontal_single_strip_mask]
+                else:
+                    raise RuntimeError("MaskParser: Cannot handle part of block cross mask: {0}".format(block))
+            output = {MaskId.block_cross: mask_block_cross(horizontal=horizontal_part.entry,
+                                                           vertical=vertical_part.entry,
+                                                           detector_type=detector_type)}
+        return output
+
+    def _extract_line_mask(self, line):
+        line_string = re.sub(self._line, "", line)
+        line_values = extract_float_list(line_string, " ")
+        length_values = len(line_values)
+        if length_values == 2:
+            output = {MaskId.line: mask_line(width=line_values[0], angle=line_values[1],
+                                             x=None, y=None)}
+        elif length_values == 4:
+            output = {MaskId.line: mask_line(width=line_values[0], angle=line_values[1],
+                                             x=line_values[2], y=line_values[3])}
+        else:
+            raise ValueError("MaskParser: Line mask accepts wither 2 or 4 parameters,"
+                             " but {0} parameters were passed in.".format(length_values))
+        return output
+
+    def _extract_time_mask(self, line):
+        # Check if one of the detectors is found
+        has_hab = re.search(self._hab, line)
+        has_lab = re.search(self._lab, line)
+        if has_hab is not None or has_lab is not None:
+            key = MaskId.time_detector
+            detector_type = DetectorType.HAB if has_hab is not None else DetectorType.LAB
+            regex_string = "\s*(" + self._hab + ")\s*/\s*" if has_hab else "\s*(" + self._lab + ")\s*/\s*"
+            min_and_max_time_range = re.sub(regex_string, "", line)
+        else:
+            key = MaskId.time
+            detector_type = None
+            min_and_max_time_range = line
+        min_and_max_time_range = re.sub(self._time_or_t, "", min_and_max_time_range)
+        min_and_max_time = extract_float_range(min_and_max_time_range)
+        return {key: range_entry_with_detector(start=min_and_max_time[0], stop=min_and_max_time[1],
+                                               detector_type=detector_type)}
+
+    def _extract_clear_mask(self, line):
+        clear_removed = re.sub(self._clear, "", line)
+        return {MaskId.clear_detector_mask: True} if clear_removed == "" else \
+            {MaskId.clear_time_mask: True}
+
+    def _extract_single_spectrum_mask(self, line):
+        single_spectrum_string = re.sub(self._spectrum, "", line)
+        single_spectrum = convert_string_to_integer(single_spectrum_string)
+        return {MaskId.single_spectrum_mask: single_spectrum}
+
+    def _extract_spectrum_range_mask(self, line):
+        spectrum_range_string = re.sub(self._spectrum, "", line)
+        spectrum_range_string = re.sub(self._range, " ", spectrum_range_string)
+        spectrum_range = extract_int_range(spectrum_range_string)
+        return {MaskId.spectrum_range_mask: range_entry(start=spectrum_range[0], stop=spectrum_range[1])}
+
+    def _extract_vertical_single_strip_mask(self, line):
+        detector_type = DetectorType.HAB if re.search(self._hab, line) is not None else DetectorType.LAB
+        single_vertical_strip_string = re.sub(self._detector, "", line)
+        single_vertical_strip_string = re.sub(self._v, "", single_vertical_strip_string)
+        single_vertical_strip = convert_string_to_integer(single_vertical_strip_string)
+        return {MaskId.vertical_single_strip_mask: single_entry_with_detector(entry=single_vertical_strip,
+                                                                              detector_type=detector_type)}
+
+    def _extract_vertical_range_strip_mask(self, line):
+        detector_type = DetectorType.HAB if re.search(self._hab, line) is not None else DetectorType.LAB
+        range_vertical_strip_string = re.sub(self._detector, "", line)
+        range_vertical_strip_string = re.sub(self._v, "", range_vertical_strip_string)
+        range_vertical_strip_string = re.sub(self._range, " ", range_vertical_strip_string)
+        range_vertical_strip = extract_int_range(range_vertical_strip_string)
+        return {MaskId.vertical_range_strip_mask: range_entry_with_detector(start=range_vertical_strip[0],
+                                                                            stop=range_vertical_strip[1],
+                                                                            detector_type=detector_type)}
+
+    def _extract_horizontal_single_strip_mask(self, line):
+        detector_type = DetectorType.HAB if re.search(self._hab, line) is not None else DetectorType.LAB
+        single_horizontal_strip_string = re.sub(self._detector, "", line)
+        single_horizontal_strip_string = re.sub(self._h, "", single_horizontal_strip_string)
+        single_horizontal_strip = convert_string_to_integer(single_horizontal_strip_string)
+        return {MaskId.horizontal_single_strip_mask: single_entry_with_detector(entry=single_horizontal_strip,
+                                                                                detector_type=detector_type)}
+
+    def _extract_horizontal_range_strip_mask(self, line):
+        detector_type = DetectorType.HAB if re.search(self._hab, line) is not None else DetectorType.LAB
+        range_horizontal_strip_string = re.sub(self._detector, "", line)
+        range_horizontal_strip_string = re.sub(self._h, "", range_horizontal_strip_string)
+        range_horizontal_strip_string = re.sub(self._range, " ", range_horizontal_strip_string)
+        range_horizontal_strip = extract_int_range(range_horizontal_strip_string)
+        return {MaskId.horizontal_range_strip_mask: range_entry_with_detector(start=range_horizontal_strip[0],
+                                                                              stop=range_horizontal_strip[1],
+                                                                              detector_type=detector_type)}
+
+    @staticmethod
+    def get_type():
+        return MaskParser.Type
+
+    @staticmethod
+    @abc.abstractmethod
+    def get_type_pattern():
+        return "\\s*" + MaskParser.get_type() + "(\\s*/\\s*|\\s+)"
+
+
+class SampleParser(UserFileComponentParser):
+    """
+    The SampleParser handles the following structure for
+        SAMPLE/OFFSET z1
+        SAMPLE/PATH/ON
+        SAMPLE/PATH/OFF
+    """
+    Type = "SAMPLE"
+
+    def __init__(self):
+        super(SampleParser, self).__init__()
+
+        # Offset
+        self._offset = "\\s*OFFSET\\s*"
+        self._offset_pattern = re.compile(start_string + self._offset + space_string + float_number + end_string)
+
+        # Path
+        self._on = "\\s*ON\\s*"
+        self._off = "\\s*OFF\\s*"
+        self._path = "\\s*PATH\\s*/\\s*"
+        self._path_pattern = re.compile(start_string + self._path + "(" + self._on + "|" + self._off + ")" + end_string)
+
+    def parse_line(self, line):
+        # Get the settings, ie remove command
+        setting = UserFileComponentParser.get_settings(line, SampleParser.get_type_pattern())
+
+        # Determine the qualifier and extract the user setting
+        if self._is_sample_path(setting):
+            output = self._extract_sample_path(setting)
+        elif self._is_sample_offset(setting):
+            output = self._extract_sample_offset(setting)
+        else:
+            raise RuntimeError("SampleParser: Unknown command for SAMPLE: {0}".format(line))
+        return output
+
+    def _is_sample_path(self, line):
+        return does_pattern_match(self._path_pattern, line)
+
+    def _is_sample_offset(self, line):
+        return does_pattern_match(self._offset_pattern, line)
+
+    def _extract_sample_path(self, line):
+        value = False if re.search(self._off, line) is not None else True
+        return {SampleId.path: value}
+
+    def _extract_sample_offset(self, line):
+        offset_string = re.sub(self._offset, "", line)
+        offset = convert_string_to_float(offset_string)
+        return {SampleId.offset: offset}
+
+    @staticmethod
+    def get_type():
+        return SampleParser.Type
+
+    @staticmethod
+    @abc.abstractmethod
+    def get_type_pattern():
+        return "\\s*" + SampleParser.get_type() + "(\\s*/)\\s*"
+
+
+class SetParser(UserFileComponentParser):
+    """
+    The SetParser handles the following structure for
+        SET CENTRE[/MAIN] x y
+        SET CENTRE/HAB x y
+        SET SCALES s a b c d
+    """
+    Type = "SET"
+
+    def __init__(self):
+        super(SetParser, self).__init__()
+
+        # Scales
+        self._scales = "\\s*SCALES\\s*"
+        self._scales_pattern = re.compile(start_string + self._scales + space_string + float_number + space_string +
+                                          float_number + space_string + float_number + space_string + float_number +
+                                          space_string + float_number + end_string)
+
+        # Centre
+        self._centre = "\\s*CENTRE\\s*"
+        self._hab = "\\s*(HAB|FRONT)\\s*"
+        self._lab = "\\s*(LAB|REAR|MAIN)\\s*"
+        self._hab_or_lab = "\\s*((/" + self._hab + "|/" + self._lab + "))\\s*"
+        self._centre_pattern = re.compile(start_string + self._centre + "\\s*(" + self._hab_or_lab + space_string +
+                                          ")?\\s*" + float_number + space_string + float_number + end_string)
+
+    def parse_line(self, line):
+        # Get the settings, ie remove command
+        setting = UserFileComponentParser.get_settings(line, SetParser.get_type_pattern())
+
+        # Determine the qualifier and extract the user setting
+        if self._is_scales(setting):
+            output = self._extract_scales(setting)
+        elif self._is_centre(setting):
+            output = self._extract_centre(setting)
+        else:
+            raise RuntimeError("SetParser: Unknown command for SET: {0}".format(line))
+        return output
+
+    def _is_scales(self, line):
+        return does_pattern_match(self._scales_pattern, line)
+
+    def _is_centre(self, line):
+        return does_pattern_match(self._centre_pattern, line)
+
+    def _extract_scales(self, line):
+        scales_string = re.sub(self._scales, "", line)
+        scales = extract_float_list(scales_string, separator=" ")
+        if len(scales) != 5:
+            raise ValueError("SetParser: Expected 5 entries for the SCALES setting, but got {0}.".format(len(scales)))
+        return {SetId.scales: set_scales_entry(s=scales[0], a=scales[1], b=scales[2], c=scales[3], d=scales[4])}
+
+    def _extract_centre(self, line):
+        detector_type = DetectorType.HAB if re.search(self._hab, line) is not None else DetectorType.LAB
+        centre_string = re.sub(self._centre, "", line)
+        centre_string = re.sub(self._hab_or_lab, "", centre_string)
+        centre = extract_float_range(centre_string)
+        return {SetId.centre: position_entry(pos1=centre[0], pos2=centre[1], detector_type=detector_type)}
+
+    @staticmethod
+    def get_type():
+        return SetParser.Type
+
+    @staticmethod
+    @abc.abstractmethod
+    def get_type_pattern():
+        return "\\s*" + SetParser.get_type() + "\\s+"
+
+
+class TransParser(UserFileComponentParser):
+    """
+    The TransParser handles the following structure for
+        TRANS/TRANSPEC=n
+        TRANS/SAMPLEWS=ws1
+        TRANS/CANWS=ws2
+        TRANS/TRANSPEC=4[/SHIFT=z]
+        TRANS/RADIUS=r
+        TRANS/ROI=roi_mask.xml
+        TRANS/MASK=mask.xml
+    """
+    Type = "TRANS"
+
+    def __init__(self):
+        super(TransParser, self).__init__()
+        # General
+        self._single_file = "[\\w]+(\\.XML)"
+        self._multiple_files = self._single_file + "(,\\s*" + self._single_file + ")*\\s*"
+        self._workspace = "[\\w]+"
+
+        # Trans Spec
+        self._trans_spec = "\\s*TRANSPEC\\s*=\\s*"
+        self._trans_spec_pattern = re.compile(start_string + self._trans_spec + integer_number +
+                                              end_string)
+
+        # Trans Spec Shift
+        self._shift = "\\s*/\\s*SHIFT\\s*=\\s*"
+        self._trans_spec_4 = self._trans_spec + "4"
+        self._trans_spec_shift_pattern = re.compile(start_string + self._trans_spec_4 + self._shift + float_number +
+                                                    end_string)
+
+        # Radius
+        self._radius = "\\s*RADIUS\\s*=\\s*"
+        self._radius_pattern = re.compile(start_string + self._radius + float_number)
+
+        # ROI
+        self._roi = "\\s*ROI\\s*=\\s*"
+        self._roi_pattern = re.compile(start_string + self._roi + self._multiple_files + end_string)
+
+        # Mask
+        self._mask = "\\s*MASK\\s*=\\s*"
+        self._mask_pattern = re.compile(start_string + self._mask + self._multiple_files + end_string)
+
+        # CanWS
+        self._can_workspace = "\\s*CANWS\\s*=\\s*"
+        self._can_workspace_pattern = re.compile(start_string + self._can_workspace + self._workspace +
+                                                 end_string)
+        # SampleWS
+        self._sample_workspace = "\\s*SAMPLEWS\\s*=\\s*"
+        self._sample_workspace_pattern = re.compile(start_string + self._sample_workspace + self._workspace +
+                                                    end_string)
+
+    def parse_line(self, line):
+        # Get the settings, ie remove command
+        setting = UserFileComponentParser.get_settings(line, TransParser.get_type_pattern())
+
+        # Determine the qualifier and extract the user setting
+        if self._is_trans_spec(setting):
+            output = self._extract_trans_spec(setting)
+        elif self._is_trans_spec_shift(setting):
+            output = self._extract_trans_spec_shift(setting)
+        elif self._is_radius(setting):
+            output = self._extract_radius(setting)
+        elif self._is_roi(setting):
+            # Note that we need the original line in order to extract the the case sensitive meaning
+            output = self._extract_roi(setting, line)
+        elif self._is_mask(setting):
+            # Note that we need the original line in order to extract the the case sensitive meaning
+            output = self._extract_mask(setting, line)
+        elif self._is_sample_workspace(setting):
+            # Note that we need the original line in order to extract the the case sensitive meaning
+            output = self._extract_sample_workspace(setting, line)
+        elif self._is_can_workspace(setting):
+            # Note that we need the original line in order to extract the the case sensitive meaning
+            output = self._extract_can_workspace(setting, line)
+        else:
+            raise RuntimeError("TransParser: Unknown command for TRANS: {0}".format(line))
+        return output
+
+    def _is_trans_spec(self, line):
+        return does_pattern_match(self._trans_spec_pattern, line)
+
+    def _is_trans_spec_shift(self, line):
+        return does_pattern_match(self._trans_spec_shift_pattern, line)
+
+    def _is_radius(self, line):
+        return does_pattern_match(self._radius_pattern, line)
+
+    def _is_roi(self, line):
+        return does_pattern_match(self._roi_pattern, line)
+
+    def _is_mask(self, line):
+        return does_pattern_match(self._mask_pattern, line)
+
+    def _is_sample_workspace(self, line):
+        return does_pattern_match(self._sample_workspace_pattern, line)
+
+    def _is_can_workspace(self, line):
+        return does_pattern_match(self._can_workspace_pattern, line)
+
+    def _extract_trans_spec(self, line):
+        trans_spec_string = re.sub(self._trans_spec, "", line)
+        trans_spec = convert_string_to_integer(trans_spec_string)
+        return {TransId.spec: trans_spec}
+
+    def _extract_trans_spec_shift(self, line):
+        trans_spec_shift_string = re.sub(self._trans_spec_4, "", line)
+        trans_spec_shift_string = re.sub(self._shift, "", trans_spec_shift_string)
+        trans_spec_shift = convert_string_to_float(trans_spec_shift_string)
+        return {TransId.spec_shift: trans_spec_shift, TransId.spec: 4}
+
+    def _extract_radius(self, line):
+        radius_string = re.sub(self._radius, "", line)
+        radius = convert_string_to_float(radius_string)
+        return {TransId.radius: radius}
+
+    def _extract_roi(self, line, original_line):
+        file_names = TransParser.extract_file_names(line, original_line, self._roi)
+        return {TransId.roi: file_names}
+
+    def _extract_mask(self, line, original_line):
+        file_names = TransParser.extract_file_names(line, original_line, self._mask)
+        return {TransId.mask: file_names}
+
+    def _extract_sample_workspace(self, line, original_line):
+        sample_workspace = TransParser.extract_workspace(line, original_line, self._sample_workspace)
+        return {TransId.sample_workspace:  sample_workspace}
+
+    def _extract_can_workspace(self, line, original_line):
+        can_workspace = TransParser.extract_workspace(line, original_line, self._can_workspace)
+        return {TransId.can_workspace:  can_workspace}
+
+    @staticmethod
+    def extract_workspace(line, original_line, to_remove):
+        element = re.sub(to_remove, "", line)
+        element = element.strip()
+        return re.search(element, original_line, re.IGNORECASE).group(0)
+
+    @staticmethod
+    def extract_file_names(line, original_line, to_remove):
+        elements_string = re.sub(to_remove, "", line)
+        elements = extract_string_list(elements_string)
+        return [re.search(element, original_line, re.IGNORECASE).group(0) for element in elements]
+
+    @staticmethod
+    def get_type():
+        return TransParser.Type
+
+    @staticmethod
+    @abc.abstractmethod
+    def get_type_pattern():
+        return "\\s*" + TransParser.get_type() + "\\s*/\\s*"
+
+
+class TubeCalibFileParser(UserFileComponentParser):
+    """
+    The TubeCalibFileParser handles the following structure for
+        TUBECALIBFILE=calib_file.nxs
+    """
+    Type = "TUBECALIBFILE"
+
+    def __init__(self):
+        super(TubeCalibFileParser, self).__init__()
+
+        self._tube_calib_file = "\\s*[\\w]+(\\.NXS)\\s*"
+        self._tube_calib_file_pattern = re.compile(start_string + self._tube_calib_file + end_string)
+
+    def parse_line(self, line):
+        # Get the settings, ie remove command
+        setting = UserFileComponentParser.get_settings(line, TubeCalibFileParser.get_type_pattern())
+
+        # Determine the qualifier and extract the user setting
+        if self._is_tube_calib_file(setting):
+            output = self._extract_tube_calib_file(setting, line)
+        else:
+            raise RuntimeError("TubeCalibFileParser: Unknown command for TUBECALIBFILE: {0}".format(line))
+        return output
+
+    def _is_tube_calib_file(self, line):
+        return does_pattern_match(self._tube_calib_file_pattern, line)
+
+    @staticmethod
+    def _extract_tube_calib_file(line, original_line):
+        file_name_capital = line.strip()
+        file_name = re.search(file_name_capital, original_line, re.IGNORECASE).group(0)
+        return {TubeCalibrationFileId.file: file_name}
+
+    @staticmethod
+    def get_type():
+        return TubeCalibFileParser.Type
+
+    @staticmethod
+    @abc.abstractmethod
+    def get_type_pattern():
+        return "\\s*" + TubeCalibFileParser.get_type() + "\\s*=\\s*"
+
+
+class QResolutionParser(UserFileComponentParser):
+    """
+    The QResolutionParser handles the following structure for
+        QRESOL/ON
+        QRESOL/OFF
+        QRESOL/DELTAR=dr
+        QRESOL/LCOLLIM="lcollim"
+        QRESOL/MODERATOR=moderator_rkh_file.txt
+        QRESOL/A1="a1"
+        QRESOL/A2="a2"
+        QRESOL/H1="h1"
+        QRESOL/H2="h2"
+        QRESOL/W1="w1"
+        QRESOL/W2="w2"
+    """
+    Type = "QRESOL"
+
+    def __init__(self):
+        super(QResolutionParser, self).__init__()
+
+        # On Off
+        self._on = "\\s*ON\\s*"
+        self._off = "\\s*OFF\\s*"
+        self._on_or_off = "\\s*(" + self._on + "|" + self._off + ")\\s*"
+        self._on_or_off_pattern = re.compile(start_string + self._on_or_off + end_string)
+
+        # Delta R
+        self._delta_r = "\\s*DELTAR\\s*=\\s*"
+        self._delta_r_pattern = re.compile(start_string + self._delta_r + float_number + end_string)
+
+        # Collimation Length
+        self._collimation_length = "\\s*LCOLLIM\\s*=\\s*"
+        self._collimation_length_pattern = re.compile(start_string + self._collimation_length +
+                                                      float_number + end_string)
+
+        # A1
+        self._a1 = "\\s*A1\\s*=\\s*"
+        self._a1_pattern = re.compile(start_string + self._a1 + float_number + end_string)
+
+        # A2
+        self._a2 = "\\s*A2\\s*=\\s*"
+        self._a2_pattern = re.compile(start_string + self._a2 + float_number + end_string)
+
+        # H1
+        self._h1 = "\\s*H1\\s*=\\s*"
+        self._h1_pattern = re.compile(start_string + self._h1 + float_number + end_string)
+
+        # H2
+        self._h2 = "\\s*H2\\s*=\\s*"
+        self._h2_pattern = re.compile(start_string + self._h2 + float_number + end_string)
+
+        # W1
+        self._w1 = "\\s*W1\\s*=\\s*"
+        self._w1_pattern = re.compile(start_string + self._w1 + float_number + end_string)
+
+        # W2
+        self._w2 = "\\s*W2\\s*=\\s*"
+        self._w2_pattern = re.compile(start_string + self._w2 + float_number + end_string)
+
+        # Moderator
+        self._moderator = "\\s*MODERATOR\\s*=\\s*"
+        self._file = "[\\w]+(\\.TXT)"
+        self._moderator_pattern = re.compile(start_string + self._moderator + self._file)
+
+    def parse_line(self, line):
+        # Get the settings, ie remove command
+        setting = UserFileComponentParser.get_settings(line, QResolutionParser.get_type_pattern())
+
+        # Determine the qualifier and extract the user setting
+        if self._is_on_off(setting):
+            output = self._extract_on_off(setting)
+        elif self._is_delta_r(setting):
+            output = self._extract_delta_r(setting)
+        elif self._is_a1(setting):
+            output = self._extract_a1(setting)
+        elif self._is_a2(setting):
+            output = self._extract_a2(setting)
+        elif self._is_h1(setting):
+            output = self._extract_h1(setting)
+        elif self._is_w1(setting):
+            output = self._extract_w1(setting)
+        elif self._is_h2(setting):
+            output = self._extract_h2(setting)
+        elif self._is_w2(setting):
+            output = self._extract_w2(setting)
+        elif self._is_collimation_length(setting):
+            output = self._extract_collimation_length(setting)
+        elif self._is_moderator(setting):
+            output = self._extract_moderator(setting, line)
+        else:
+            raise RuntimeError("QResolutionParser: Unknown command for QRESOLUTION: {0}".format(line))
+        return output
+
+    def _is_on_off(self, line):
+        return does_pattern_match(self._on_or_off_pattern, line)
+
+    def _is_delta_r(self, line):
+        return does_pattern_match(self._delta_r_pattern, line)
+
+    def _is_a1(self, line):
+        return does_pattern_match(self._a1_pattern, line)
+
+    def _is_a2(self, line):
+        return does_pattern_match(self._a2_pattern, line)
+
+    def _is_h1(self, line):
+        return does_pattern_match(self._h1_pattern, line)
+
+    def _is_w1(self, line):
+        return does_pattern_match(self._w1_pattern, line)
+
+    def _is_h2(self, line):
+        return does_pattern_match(self._h2_pattern, line)
+
+    def _is_w2(self, line):
+        return does_pattern_match(self._w2_pattern, line)
+
+    def _is_collimation_length(self, line):
+        return does_pattern_match(self._collimation_length_pattern, line)
+
+    def _is_moderator(self, line):
+        return does_pattern_match(self._moderator_pattern, line)
+
+    def _extract_on_off(self, line):
+        value = False if re.search(self._off, line) is not None else True
+        return {QResolutionId.on: value}
+
+    def _extract_delta_r(self, line):
+        return {QResolutionId.delta_r: QResolutionParser.extract_float(line, self._delta_r)}
+
+    def _extract_collimation_length(self, line):
+        return {QResolutionId.collimation_length: QResolutionParser.extract_float(line, self._collimation_length)}
+
+    def _extract_a1(self, line):
+        return {QResolutionId.a1: QResolutionParser.extract_float(line, self._a1)}
+
+    def _extract_a2(self, line):
+        return {QResolutionId.a2: QResolutionParser.extract_float(line, self._a2)}
+
+    def _extract_h1(self, line):
+        return {QResolutionId.h1: QResolutionParser.extract_float(line, self._h1)}
+
+    def _extract_w1(self, line):
+        return {QResolutionId.w1: QResolutionParser.extract_float(line, self._w1)}
+
+    def _extract_h2(self, line):
+        return {QResolutionId.h2: QResolutionParser.extract_float(line, self._h2)}
+
+    def _extract_w2(self, line):
+        return {QResolutionId.w2: QResolutionParser.extract_float(line, self._w2)}
+
+    def _extract_moderator(self, line, original_line):
+        moderator_capital = re.sub(self._moderator, "", line)
+        moderator = re.search(moderator_capital, original_line, re.IGNORECASE).group(0)
+        return {QResolutionId.moderator: moderator}
+
+    @staticmethod
+    def extract_float(line, to_remove):
+        value_string = re.sub(to_remove, "", line)
+        return convert_string_to_float(value_string)
+
+    @staticmethod
+    def get_type():
+        return QResolutionParser.Type
+
+    @staticmethod
+    @abc.abstractmethod
+    def get_type_pattern():
+        return "\\s*" + QResolutionParser.get_type() + "\\s*/\\s*"
+
+
+class FitParser(UserFileComponentParser):
+    """
+    The FitParser handles the following structure for
+        FIT/TRANS/CLEAR  or  FIT/TRANS/OFF
+        FIT/TRANS/LIN [w1 w2]  or  FIT/TRANS/LINEAR [w1 w2]  or  FIT/TRANS/STRAIGHT [w1 w2]
+        FIT/TRANS/LOG [w1 w2]  or  FIT/TRANS/YLOG [w1 w2]
+        FIT/MONITOR time1 time2
+        FIT/TRANS/[CAN/|SAMPLE/][LIN|LOG|POLYNOMIAL[2|3|4|5]] [w1 w2]
+    """
+    Type = "FIT"
+    sample = "SAMPLE"
+    can = "CAN"
+    both = "BOTH"
+
+    def __init__(self):
+        super(FitParser, self).__init__()
+
+        # General
+        self._trans_prefix = "\\s*TRANS\\s*/\\s*"
+
+        # Clear
+        trans_off_or_clear = self._trans_prefix + "(OFF|CLEAR)\\s*"
+        self._trans_clear_pattern = re.compile(start_string + trans_off_or_clear + end_string)
+
+        # General fits
+        self._sample = "\\s*SAMPLE\\s*/\\s*"
+        self._can = "\\s*CAN\\s*/\\s*"
+        self._can_or_sample = "\\s*(" + self._can + "|" + self._sample + ")"
+        self._optional_can_or_sample = "\\s*(" + self._can_or_sample + ")?"
+
+        self._lin = "\\s*(LINEAR|LIN|STRAIGHT)\\s*"
+        self._log = "\\s*(YLOG|LOG)\\s*"
+        self._polynomial = "\\s*POLYNOMIAL\\s*"
+        self._polynomial_with_optional_order = self._polynomial + "(2|3|4|5)?\\s*"
+        self._lin_or_log_or_poly = "\\s*(" + self._lin + "|" + self._log + "|" +\
+                                   self._polynomial_with_optional_order + ")\\s*"
+        self._lin_or_log_or_poly_to_remove = "\\s*(" + self._lin + "|" + self._log + "|" + self._polynomial + ")\\s*"
+        self._wavelength_optional = "\\s*(" + float_number + space_string + float_number + ")?\\s*"
+
+        self._general_fit_pattern = re.compile(start_string + self._trans_prefix + self._optional_can_or_sample +
+                                               self._lin_or_log_or_poly + self._wavelength_optional + end_string)
+
+        # Monitor times
+        self._monitor = "\\s*MONITOR\\s*"
+        self._monitor_pattern = re.compile(start_string + self._monitor + space_string + float_number + space_string +
+                                           float_number + end_string)
+
+    def parse_line(self, line):
+        # Get the settings, ie remove command
+        setting = UserFileComponentParser.get_settings(line, FitParser.get_type_pattern())
+
+        # Determine the qualifier and extract the user setting
+        if self._is_clear(setting):
+            output = FitParser.extract_clear()
+        elif self._is_monitor(setting):
+            output = self._extract_monitor(setting)
+        elif self._is_general_fit(setting):
+            output = self._extract_general_fit(setting)
+        else:
+            raise RuntimeError("FitParser: Unknown command for FIT: {0}".format(line))
+        return output
+
+    def _is_clear(self, line):
+        return does_pattern_match(self._trans_clear_pattern, line)
+
+    def _is_monitor(self, line):
+        return does_pattern_match(self._monitor_pattern, line)
+
+    def _is_general_fit(self, line):
+        return does_pattern_match(self._general_fit_pattern, line)
+
+    def _extract_monitor(self, line):
+        values_string = re.sub(self._monitor, "", line)
+        values = extract_float_range(values_string)
+        return {FitId.monitor_times: range_entry(start=values[0], stop=values[1])}
+
+    def _extract_general_fit(self, line):
+        fit_type = self._get_fit_type(line)
+        ws_type = self._get_workspace_type(line)
+        wavelength_min, wavelength_max = self._get_wavelength(line)
+        polynomial_order = self._get_polynomial_order(fit_type, line)
+        return {FitId.general: fit_general(start=wavelength_min, stop=wavelength_max, fit_type=fit_type,
+                                           data_type=ws_type, polynomial_order=polynomial_order)}
+
+    def _get_wavelength(self, line):
+        _, wavelength_min, wavelength_max = self._get_wavelength_and_polynomial(line)
+        return wavelength_min, wavelength_max
+
+    def _get_polynomial_order(self, fit_type, line):
+        if fit_type != FitType.Polynomial:
+            poly_order = 0
+        else:
+            poly_order, _, _ = self._get_wavelength_and_polynomial(line)
+        return 2 if poly_order is None else poly_order
+
+    def _get_wavelength_and_polynomial(self, line):
+        fit_string = re.sub(self._trans_prefix, "", line)
+        fit_string = re.sub(self._can_or_sample, "", fit_string)
+        fit_string = re.sub(self._lin_or_log_or_poly_to_remove, "", fit_string)
+
+        # We should now have something like [poly_order] [w1 w2]
+        # There are four posibilties
+        # 1. There is no number
+        # 2. There is one number -> it has to be the poly_order
+        # 3. There are two numbers -> it has to be the w1 and w2
+        # 4. There are three numbers -> it has to be poly_order w1 and w2
+        fit_string = ' '.join(fit_string.split())
+        fit_string_array = fit_string.split()
+        length_array = len(fit_string_array)
+        if length_array == 0:
+            polynomial_order = None
+            wavelength_min = None
+            wavelength_max = None
+        elif length_array == 1:
+            polynomial_order = convert_string_to_integer(fit_string_array[0])
+            wavelength_min = None
+            wavelength_max = None
+        elif length_array == 2:
+            polynomial_order = None
+            wavelength_min = convert_string_to_float(fit_string_array[0])
+            wavelength_max = convert_string_to_float(fit_string_array[1])
+        elif length_array == 3:
+            polynomial_order = convert_string_to_integer(fit_string_array[0])
+            wavelength_min = convert_string_to_float(fit_string_array[1])
+            wavelength_max = convert_string_to_float(fit_string_array[2])
+        else:
+            raise RuntimeError("FitParser: Incorrect number of fit entries: {0}".format(line))
+
+        return polynomial_order, wavelength_min, wavelength_max
+
+    def _get_fit_type(self, line):
+        if re.search(self._log, line) is not None:
+            fit_type = FitType.Log
+        elif re.search(self._lin, line) is not None:
+            fit_type = FitType.Linear
+        elif re.search(self._polynomial, line) is not None:
+            fit_type = FitType.Polynomial
+        else:
+            raise RuntimeError("FitParser: Encountered unknown fit function: {0}".format(line))
+        return fit_type
+
+    def _get_workspace_type(self, line):
+        if re.search(self._sample, line) is not None:
+            ws_type = DataType.Sample
+        elif re.search(self._can, line) is not None:
+            ws_type = DataType.Can
+        else:
+            ws_type = None
+        return ws_type
+
+    @staticmethod
+    def extract_clear():
+        return {FitId.clear: True}
+
+    @staticmethod
+    def get_type():
+        return FitParser.Type
+
+    @staticmethod
+    @abc.abstractmethod
+    def get_type_pattern():
+        return "\\s*" + FitParser.get_type() + "\\s*/\\s*"
+
+
+class GravityParser(UserFileComponentParser):
+    """
+    The GravityParser handles the following structure for
+        GRAVITY ON
+        GRAVITY OFF
+        GRAVITY/LEXTRA=l1 or (non-standard) GRAVITY/LEXTRA l1
+    """
+    Type = "GRAVITY"
+
+    def __init__(self):
+        super(GravityParser, self).__init__()
+
+        # On Off
+        self._on = "ON"
+        self._on_off = "\\s*(OFF|" + self._on + ")"
+        self._on_off_pattern = re.compile(start_string + self._on_off + end_string)
+
+        # Extra length
+        self._extra_length = "\\s*LEXTRA\\s*(=|\\s)?\\s*"
+        self._extra_length_pattern = re.compile(start_string + self._extra_length + float_number + end_string)
+
+    def parse_line(self, line):
+        # Get the settings, ie remove command
+        setting = UserFileComponentParser.get_settings(line, GravityParser.get_type_pattern())
+
+        # Determine the qualifier and extract the user setting
+        if self._is_on_off(setting):
+            output = self._extract_on_off(setting)
+        elif self._is_extra_length(setting):
+            output = self._extract_extra_length(setting)
+        else:
+            raise RuntimeError("GravityParser: Unknown command for GRAVITY: {0}".format(line))
+        return output
+
+    def _is_on_off(self, line):
+        return does_pattern_match(self._on_off_pattern, line)
+
+    def _is_extra_length(self, line):
+        return does_pattern_match(self._extra_length_pattern, line)
+
+    def _extract_on_off(self, line):
+        value = re.sub(self._on, "", line).strip() == ""
+        return {GravityId.on_off: value}
+
+    def _extract_extra_length(self, line):
+        extra_length_string = re.sub(self._extra_length, "", line)
+        extra_length = convert_string_to_float(extra_length_string)
+        return {GravityId.extra_length: extra_length}
+
+    @staticmethod
+    def get_type():
+        return GravityParser.Type
+
+    @staticmethod
+    @abc.abstractmethod
+    def get_type_pattern():
+        return "\\s*" + GravityParser.get_type() + "(\\s*/\\s*|\\s+)"
+
+
+class MaskFileParser(UserFileComponentParser):
+    """
+    The MaskFileParser handles the following structure for
+        MASKFILE=mask1.xml,mask2.xml,...
+    """
+    Type = "MASKFILE"
+
+    def __init__(self):
+        super(MaskFileParser, self).__init__()
+
+        # MaskFile
+        self._single_file = "[\\w]+(\\.XML)"
+        self._multiple_files = self._single_file + "(,\\s*" + self._single_file + ")*\\s*"
+        self._mask_file_pattern = re.compile(start_string + "\\s*" + self._multiple_files + end_string)
+
+    def parse_line(self, line):
+        # Get the settings, ie remove command
+        setting = UserFileComponentParser.get_settings(line, MaskFileParser.get_type_pattern())
+
+        # Determine the qualifier and extract the user setting
+        if self._is_mask_file(setting):
+            output = MaskFileParser.extract_mask_file(setting, line)
+        else:
+            raise RuntimeError("MaskFileParser: Unknown command for MASKFILE: {0}".format(line))
+        return output
+
+    def _is_mask_file(self, line):
+        return does_pattern_match(self._mask_file_pattern, line)
+
+    @staticmethod
+    def extract_mask_file(line, original_line):
+        elements_capital = extract_string_list(line)
+        elements = [re.search(element, original_line, re.IGNORECASE).group(0) for element in elements_capital]
+        return {MaskId.file: elements}
+
+    @staticmethod
+    def get_type():
+        return MaskFileParser.Type
+
+    @staticmethod
+    @abc.abstractmethod
+    def get_type_pattern():
+        return "\\s*" + MaskFileParser.get_type() + "(\\s*=\\s*)"
+
+
+class MonParser(UserFileComponentParser):
+    """
+    The MonParser handles the following structure for
+        MON/DIRECT[/FRONT]=file  or  MON/DIRECT[/REAR]=file
+        MON/FLAT[/FRONT]=file  or  MON/FLAT[/REAR]=file
+        MON/HAB=file
+
+        MON/LENGTH=z sp [/INTERPOLATE]  or  MON/LENGTH=z sp [/INTERPOLATE]
+        MON[/TRANS]/SPECTRUM=sp [/INTERPOLATE]  or  MON[/TRANS]/SPECTRUM=sp [/INTERPOLATE]
+    """
+    Type = "MON"
+
+    def __init__(self):
+        super(MonParser, self).__init__()
+
+        # General
+        self._hab = "\\s*HAB|FRONT\\s*"
+        self._lab = "\\s*LAB|REAR|MAIN\\s*"
+        self._detector = "\\s*/\\s*(" + self._hab + "|" + self._lab + ")\\s*"
+        self._optional_detector = "\\s*(" + self._detector + ")?\\s*"
+        self._equal = "\\s*=\\s*"
+
+        self._file_path = "\\s*[^\\s]*\\.[\\w]+\\s*"
+
+        # Length
+        self._length = "\\s*LENGTH\\s*=\\s*"
+        self._interpolate = "\\s*/\\s*INTERPOLATE\\s*"
+        self._length_pattern = re.compile(start_string + self._length + float_number + space_string + integer_number +
+                                          "(\\s*" + self._interpolate + "\\s*)?" + end_string)
+
+        # Direct
+        self._direct = "\\s*DIRECT\\s*"
+        self._direct_pattern = re.compile(start_string + self._direct + self._optional_detector +
+                                          self._equal + self._file_path + end_string)
+
+        # Flat
+        self._flat = "\\s*FLAT\\s*"
+        self._flat_pattern = re.compile(start_string + self._flat + self._optional_detector +
+                                        self._equal + self._file_path + end_string)
+
+        # Flat
+        self._hab_file = "\\s*HAB\\s*"
+        self._hab_pattern = re.compile(start_string + self._hab_file + self._optional_detector +
+                                       self._equal + self._file_path + end_string)
+
+        # Spectrum
+        self._spectrum = "\\s*SPECTRUM\\s*"
+        self._trans = "\\s*TRANS\\s*"
+        self._spectrum_pattern = re.compile(start_string + "(\\s*" + self._trans + "\\s*/\\s*)?" + self._spectrum +
+                                            self._equal + integer_number + "(\\s*" + self._interpolate + "\\s*)?" +
+                                            end_string)
+
+    def parse_line(self, line):
+        # Get the settings, ie remove command
+        line = escape_special_characters_for_file_path(line)
+        setting = UserFileComponentParser.get_settings(line, MonParser.get_type_pattern())
+
+        # Determine the qualifier and extract the user setting
+        if self._is_length(setting):
+            output = self._extract_length(setting)
+        elif self._is_direct(setting):
+            output = self._extract_direct(setting, line)
+        elif self._is_flat(setting):
+            output = self._extract_flat(setting, line)
+        elif self._is_hab(setting):
+            output = self._extract_hab(setting, line)
+        elif self._is_spectrum(setting):
+            output = self._extract_spectrum(setting)
+        else:
+            raise RuntimeError("MonParser: Unknown command for MON: {0}".format(line))
+        return output
+
+    def _is_length(self, line):
+        return does_pattern_match(self._length_pattern, line)
+
+    def _is_direct(self, line):
+        return does_pattern_match(self._direct_pattern, line)
+
+    def _is_flat(self, line):
+        return does_pattern_match(self._flat_pattern, line)
+
+    def _is_hab(self, line):
+        return does_pattern_match(self._hab_pattern, line)
+
+    def _is_spectrum(self, line):
+        return does_pattern_match(self._spectrum_pattern, line)
+
+    def _extract_length(self, line):
+        if re.search(self._interpolate, line) is not None:
+            interpolate = True
+            line = re.sub(self._interpolate, "", line)
+        else:
+            interpolate = False
+        length_string = re.sub(self._length, "", line)
+        length_entries = extract_float_list(length_string, separator=" ")
+        if len(length_entries) != 2:
+            raise RuntimeError("MonParser: Length setting needs 2 numeric parameters, "
+                               "but received {0}.".format(len(length_entries)))
+        return {MonId.length: monitor_length(length=length_entries[0], spectrum=length_entries[1],
+                                             interpolate=interpolate)}
+
+    def _extract_direct(self, line, original_line):
+        # If we have a HAB specified then select HAB
+        # If we have LAB specified then select LAB
+        # If nothing is specified then select BOTH
+        is_hab = re.search(self._hab, line, re.IGNORECASE)
+        is_lab = re.search(self._lab, line, re.IGNORECASE)
+
+        if not is_hab and not is_lab:
+            is_hab = True
+            is_lab = True
+
+        file_path = self._extract_file_path(line, original_line, self._direct)
+        output = []
+        if is_hab:
+            output.append(monitor_file(file_path=file_path, detector_type=DetectorType.HAB))
+        if is_lab:
+            output.append(monitor_file(file_path=file_path, detector_type=DetectorType.LAB))
+        return {MonId.direct: output}
+
+    def _extract_flat(self, line, original_line):
+        # If we have a HAB specified then select HAB
+        # If we have LAB specified then select LAB
+        # If nothing is specified then select LAB
+        detector_type = DetectorType.HAB if re.search(self._hab, line, re.IGNORECASE) else DetectorType.LAB
+        file_path = self._extract_file_path(line, original_line, self._flat)
+        return {MonId.flat: monitor_file(file_path=file_path, detector_type=detector_type)}
+
+    def _extract_hab(self, line, original_line):
+        file_path = self._extract_file_path(line, original_line, self._hab_file)
+        return {MonId.hab: file_path}
+
+    def _extract_file_path(self, line, original_line, to_remove):
+        direct = re.sub(self._detector, "", line)
+        # Remove only the first occurrence
+        direct = re.sub(to_remove, "", direct, count=1)
+        direct = re.sub(self._equal, "", direct)
+        direct = direct.strip()
+        return re.search(direct, original_line, re.IGNORECASE).group(0)
+
+    def _extract_spectrum(self, line):
+        if re.search(self._interpolate, line) is not None:
+            interpolate = True
+            line = re.sub(self._interpolate, "", line)
+        else:
+            interpolate = False
+
+        if re.search(self._trans, line) is not None:
+            is_trans = True
+            line = re.sub(self._trans, "", line)
+            line = re.sub("/", "", line)
+        else:
+            is_trans = False
+
+        line = re.sub(self._spectrum, "", line)
+        line = re.sub(self._equal, "", line)
+        spectrum = convert_string_to_integer(line)
+        return {MonId.spectrum: monitor_spectrum(spectrum=spectrum, is_trans=is_trans, interpolate=interpolate)}
+
+    @staticmethod
+    def get_type():
+        return MonParser.Type
+
+    @staticmethod
+    @abc.abstractmethod
+    def get_type_pattern():
+        return "\\s*" + MonParser.get_type() + "(\\s*/\\s*)"
+
+
+class PrintParser(UserFileComponentParser):
+    """
+    The PrintParser handles the following structure for
+        PRINT string
+    """
+    Type = "PRINT"
+
+    def __init__(self):
+        super(PrintParser, self).__init__()
+
+        # Print
+        self._print = "\\s*PRINT\\s+"
+        self._print_pattern = re.compile(start_string + self._print + "\\s*.*\\s*" + end_string)
+
+    def parse_line(self, line):
+        # Get the settings, ie remove command
+        setting = UserFileComponentParser.get_settings(line, PrintParser.get_type_pattern())
+
+        # Determine the qualifier and extract the user setting
+        original_setting = re.search(setting.strip(), line, re.IGNORECASE).group(0)
+        return {PrintId.print_line: original_setting}
+
+    @staticmethod
+    def get_type():
+        return PrintParser.Type
+
+    @staticmethod
+    @abc.abstractmethod
+    def get_type_pattern():
+        return "\\s*" + PrintParser.get_type() + "(\\s+)"
+
+
+class SANS2DParser(UserFileComponentParser):
+    """
+    The SANS2D is a hollow parser to ensure backwards compatibility
+    """
+    Type = "SANS2D"
+
+    def __init__(self):
+        super(SANS2DParser, self).__init__()
+
+    def parse_line(self, line):
+        return {}
+
+    @staticmethod
+    def get_type():
+        return SANS2DParser.Type
+
+    @staticmethod
+    @abc.abstractmethod
+    def get_type_pattern():
+        return "\\s*" + SANS2DParser.get_type() + "(\\s*)"
+
+
+class LOQParser(UserFileComponentParser):
+    """
+    The LOQParser is a hollow parser to ensure backwards compatibility
+    """
+    Type = "LOQ"
+
+    def __init__(self):
+        super(LOQParser, self).__init__()
+
+    def parse_line(self, line):
+        return {}
+
+    @staticmethod
+    def get_type():
+        return LOQParser.Type
+
+    @staticmethod
+    @abc.abstractmethod
+    def get_type_pattern():
+        return "\\s*" + LOQParser.get_type() + "(\\s*)"
+
+
+class UserFileParser(object):
+    def __init__(self):
+        super(UserFileParser, self).__init__()
+        self._parsers = {BackParser.get_type(): BackParser(),
+                         DetParser.get_type(): DetParser(),
+                         LimitParser.get_type(): LimitParser(),
+                         MaskParser.get_type(): MaskParser(),
+                         SampleParser.get_type(): SampleParser(),
+                         SetParser.get_type(): SetParser(),
+                         TransParser.get_type(): TransParser(),
+                         TubeCalibFileParser.get_type(): TubeCalibFileParser(),
+                         QResolutionParser.get_type(): QResolutionParser(),
+                         FitParser.get_type(): FitParser(),
+                         GravityParser.get_type(): GravityParser(),
+                         MaskFileParser.get_type(): MaskFileParser(),
+                         MonParser.get_type(): MonParser(),
+                         PrintParser.get_type(): PrintParser(),
+                         SANS2DParser.get_type(): SANS2DParser(),
+                         LOQParser.get_type(): LOQParser()}
+
+    def _get_correct_parser(self, line):
+        line = line.strip()
+        line = line.upper()
+        for key in self._parsers:
+            parser = self._parsers[key]
+            if re.match(parser.get_type_pattern(), line, re.IGNORECASE) is not None:
+                return parser
+            else:
+                continue
+        # We have encountered an unknown file specifier.
+        raise ValueError("UserFileParser: Unknown user "
+                         "file command: {0}".format(line))
+
+    def parse_line(self, line):
+        # Clean the line of trailing white space
+        line = line.strip()
+
+        # If the line is empty, then ignore it
+        if not line:
+            return {}
+
+        # If the entry is a comment, then ignore it
+        if line.startswith("!"):
+            return {}
+
+        # Get the appropriate parser
+        parser = self._get_correct_parser(line)
+
+        # Parse the line and return the result
+        return parser.parse_line(line)
diff --git a/scripts/SANS/sans/user_file/user_file_reader.py b/scripts/SANS/sans/user_file/user_file_reader.py
new file mode 100644
index 0000000000000000000000000000000000000000..8848ef4bfa459e8490edfa7d4a9cbd32f435546e
--- /dev/null
+++ b/scripts/SANS/sans/user_file/user_file_reader.py
@@ -0,0 +1,42 @@
+from sans.common.file_information import find_full_file_path
+from sans.user_file.user_file_parser import UserFileParser
+
+
+class UserFileReader(object):
+    def __init__(self, user_file):
+        super(UserFileReader, self).__init__()
+        self._user_file = find_full_file_path(user_file)
+
+    @staticmethod
+    def _add_to_output(output, parsed):
+        # If the parsed values already exist in the output dict, then we extend the output
+        # else we just add it to the output dict. We have to be careful if we are dealing with a sequence. The scenarios
+        # are:
+        # 1. Exists and is standard value => add it to the existing list
+        # 2. Exists and is a sequence type => extend the existing list
+        # 3. Does not exist and is a standard value => create a list with that value and add it
+        # 4. Does not exist and is a sequence type => add the list itself
+        for key, value in list(parsed.items()):
+            is_list = isinstance(value, list)
+            is_key_in_output = key in output
+            if is_key_in_output and is_list:
+                output[key].extend(value)
+            elif is_key_in_output and not is_list:
+                output[key].append(value)
+            elif not is_key_in_output and is_list:
+                output[key] = value
+            else:
+                output[key] = [value]
+
+    def read_user_file(self):
+        # Read in all elements
+        parser = UserFileParser()
+
+        output = {}
+        with open(self._user_file) as f:
+            for line in f:
+                parsed = parser.parse_line(line)
+                UserFileReader._add_to_output(output, parsed)
+
+        # Provide the read elements
+        return output
diff --git a/scripts/SANS/sans/user_file/user_file_state_director.py b/scripts/SANS/sans/user_file/user_file_state_director.py
new file mode 100644
index 0000000000000000000000000000000000000000..fbd14aaab5cc51995073398e889a5a19b1e85800
--- /dev/null
+++ b/scripts/SANS/sans/user_file/user_file_state_director.py
@@ -0,0 +1,1243 @@
+from mantid.kernel import logger
+
+from sans.common.enums import (DetectorType, FitModeForMerge, RebinType, DataType)
+from sans.common.file_information import find_full_file_path
+from sans.common.general_functions import (get_ranges_for_rebin_setting, get_ranges_for_rebin_array,
+                                           get_ranges_from_event_slice_setting)
+from sans.user_file.user_file_reader import UserFileReader
+from sans.user_file.user_file_common import (DetectorId, BackId, LimitsId, TransId, TubeCalibrationFileId,
+                                             QResolutionId, MaskId, SampleId, SetId, MonId, FitId, GravityId, OtherId,
+                                             simple_range, complex_range, rebin_string_values)
+
+from sans.state.automatic_setters import set_up_setter_forwarding_from_director_to_builder
+from sans.state.state import get_state_builder
+from sans.state.mask import get_mask_builder
+from sans.state.move import get_move_builder
+from sans.state.reduction_mode import get_reduction_mode_builder
+from sans.state.slice_event import get_slice_event_builder
+from sans.state.wavelength import get_wavelength_builder
+from sans.state.save import get_save_builder
+from sans.state.scale import get_scale_builder
+from sans.state.adjustment import get_adjustment_builder
+from sans.state.normalize_to_monitor import get_normalize_to_monitor_builder
+from sans.state.calculate_transmission import get_calculate_transmission_builder
+from sans.state.wavelength_and_pixel_adjustment import get_wavelength_and_pixel_adjustment_builder
+from sans.state.convert_to_q import get_convert_to_q_builder
+from sans.state.compatibility import get_compatibility_builder
+import collections
+
+
+def check_if_contains_only_one_element(to_check, element_name):
+    if len(to_check) > 1:
+        msg = "The element {0} contains more than one element. Expected only one element. " \
+              "The last element {1} is used. The elements {2} are discarded.".format(element_name,
+                                                                                     to_check[-1], to_check[:-1])
+        logger.notice(msg)
+
+
+def log_non_existing_field(field):
+    msg = "The field {0} does not seem to exist on the state.".format(field)
+    logger.notice(msg)
+
+
+def convert_detector(detector_type):
+    if detector_type is DetectorType.HAB:
+        detector_type_as_string = DetectorType.to_string(DetectorType.HAB)
+    elif detector_type is DetectorType.LAB:
+        detector_type_as_string = DetectorType.to_string(DetectorType.LAB)
+    else:
+        raise RuntimeError("UserFileStateDirector: Cannot convert detector {0}".format(detector_type))
+    return detector_type_as_string
+
+
+def get_min_q_boundary(min_q1, min_q2):
+    if not min_q1 and min_q2:
+        val = min_q2
+    elif min_q1 and not min_q2:
+        val = min_q1
+    elif not min_q1 and not min_q2:
+        val = None
+    else:
+        val = max(min_q1, min_q2)
+    return val
+
+
+def get_max_q_boundary(max_q1, max_q2):
+    if not max_q1 and max_q2:
+        val = max_q2
+    elif max_q1 and not max_q2:
+        val = max_q1
+    elif not max_q1 and not max_q2:
+        val = None
+    else:
+        val = min(max_q1, max_q2)
+    return val
+
+
+def convert_mm_to_m(value):
+    return value/1000.
+
+
+def set_background_tof_general(builder, user_file_items):
+    # The general background settings
+    if BackId.all_monitors in user_file_items:
+        back_all_monitors = user_file_items[BackId.all_monitors]
+        # Should the user have chosen several values, then the last element is selected
+        check_if_contains_only_one_element(back_all_monitors, BackId.all_monitors)
+        back_all_monitors = back_all_monitors[-1]
+        builder.set_background_TOF_general_start(back_all_monitors.start)
+        builder.set_background_TOF_general_stop(back_all_monitors.stop)
+
+
+def set_background_tof_monitor(builder, user_file_items):
+    # The monitor off switches. Get all monitors which should not have an individual background setting
+    monitor_exclusion_list = []
+    if BackId.monitor_off in user_file_items:
+        back_monitor_off = user_file_items[BackId.monitor_off]
+        monitor_exclusion_list = list(back_monitor_off.values())
+
+    # Get all individual monitor background settings. But ignore those settings where there was an explicit
+    # off setting. Those monitors were collected in the monitor_exclusion_list collection
+    if BackId.single_monitors in user_file_items:
+        background_tof_monitor_start = {}
+        background_tof_monitor_stop = {}
+        back_single_monitors = user_file_items[BackId.single_monitors]
+        for element in back_single_monitors:
+            monitor = element.monitor
+            if monitor not in monitor_exclusion_list:
+                # We need to set it to string since Mantid's Property manager cannot handle integers as a key.
+                background_tof_monitor_start.update({str(monitor): element.start})
+                background_tof_monitor_stop.update({str(monitor): element.stop})
+        builder.set_background_TOF_monitor_start(background_tof_monitor_start)
+        builder.set_background_TOF_monitor_stop(background_tof_monitor_stop)
+
+
+def set_wavelength_limits(builder, user_file_items):
+    if LimitsId.wavelength in user_file_items:
+        wavelength_limits = user_file_items[LimitsId.wavelength]
+        check_if_contains_only_one_element(wavelength_limits, LimitsId.wavelength)
+        wavelength_limits = wavelength_limits[-1]
+        builder.set_wavelength_low(wavelength_limits.start)
+        builder.set_wavelength_high(wavelength_limits.stop)
+        builder.set_wavelength_step(wavelength_limits.step)
+        builder.set_wavelength_step_type(wavelength_limits.step_type)
+
+
+def set_prompt_peak_correction(builder, user_file_items):
+    if FitId.monitor_times in user_file_items:
+        fit_monitor_times = user_file_items[FitId.monitor_times]
+        # Should the user have chosen several values, then the last element is selected
+        check_if_contains_only_one_element(fit_monitor_times, FitId.monitor_times)
+        fit_monitor_times = fit_monitor_times[-1]
+        builder.set_prompt_peak_correction_min(fit_monitor_times.start)
+        builder.set_prompt_peak_correction_max(fit_monitor_times.stop)
+
+
+def set_single_entry(builder, method_name, tag, all_entries, apply_to_value=None):
+    """
+    Sets a single element on the specified builder via a specified method name.
+
+    If several entries were specified by the user, then the last entry is specified and the
+    @param builder: a builder object
+    @param method_name: a method on the builder object
+    @param tag: the tag of an entry which is potentially part of all_entries
+    @param all_entries: all parsed entries
+    @param apply_to_value: a function which should be applied before setting the value. If it is None, then nothing
+                           happens
+    """
+    if tag in all_entries:
+        list_of_entries = all_entries[tag]
+        # We expect only one entry, but the user could have specified it several times.
+        # If so we want to log it.
+        check_if_contains_only_one_element(list_of_entries, tag)
+        # We select the entry which was added last.
+        entry = list_of_entries[-1]
+        if apply_to_value is not None:
+            entry = apply_to_value(entry)
+        # Set the value on the specified method
+        method = getattr(builder, method_name)
+        method(entry)
+
+
+class UserFileStateDirectorISIS(object):
+    def __init__(self, data_info):
+        super(UserFileStateDirectorISIS, self).__init__()
+        data_info.validate()
+        self._data = data_info
+
+        self._user_file = None
+
+        self._state_builder = get_state_builder(self._data)
+        self._mask_builder = get_mask_builder(self._data)
+        self._move_builder = get_move_builder(self._data)
+        self._reduction_builder = get_reduction_mode_builder(self._data)
+        self._slice_event_builder = get_slice_event_builder(self._data)
+        self._wavelength_builder = get_wavelength_builder(self._data)
+        self._save_builder = get_save_builder(self._data)
+        self._scale_builder = get_scale_builder(self._data)
+
+        self._adjustment_builder = get_adjustment_builder(self._data)
+        self._normalize_to_monitor_builder = get_normalize_to_monitor_builder(self._data)
+        self._calculate_transmission_builder = get_calculate_transmission_builder(self._data)
+        self._wavelength_and_pixel_adjustment_builder = get_wavelength_and_pixel_adjustment_builder(self._data)
+
+        self._convert_to_q_builder = get_convert_to_q_builder(self._data)
+
+        self._compatibility_builder = get_compatibility_builder(self._data)
+
+        # Now that we have setup all builders in the director we want to also allow for manual setting
+        # of some components. In order to get the desired results we need to perform setter forwarding, e.g
+        # self._scale_builder has the setter set_width, then the director should have a method called
+        # set_scale_width whose input is forwarded to the actual builder. We can only set this retroactively
+        # via monkey-patching.
+        self._set_up_setter_forwarding()
+
+    def _set_up_setter_forwarding(self):
+        set_up_setter_forwarding_from_director_to_builder(self, "_state_builder")
+        set_up_setter_forwarding_from_director_to_builder(self, "_mask_builder")
+        set_up_setter_forwarding_from_director_to_builder(self, "_move_builder")
+        set_up_setter_forwarding_from_director_to_builder(self, "_reduction_builder")
+        set_up_setter_forwarding_from_director_to_builder(self, "_slice_event_builder")
+        set_up_setter_forwarding_from_director_to_builder(self, "_wavelength_builder")
+        set_up_setter_forwarding_from_director_to_builder(self, "_save_builder")
+        set_up_setter_forwarding_from_director_to_builder(self, "_scale_builder")
+        set_up_setter_forwarding_from_director_to_builder(self, "_adjustment_builder")
+        set_up_setter_forwarding_from_director_to_builder(self, "_normalize_to_monitor_builder")
+        set_up_setter_forwarding_from_director_to_builder(self, "_calculate_transmission_builder")
+        set_up_setter_forwarding_from_director_to_builder(self, "_wavelength_and_pixel_adjustment_builder")
+        set_up_setter_forwarding_from_director_to_builder(self, "_convert_to_q_builder")
+
+        set_up_setter_forwarding_from_director_to_builder(self, "_compatibility_builder")
+
+    def set_user_file(self, user_file):
+        file_path = find_full_file_path(user_file)
+        if file_path is None:
+            raise RuntimeError("UserFileStateDirector: The specified user file cannot be found. Make sure that the "
+                               "directory which contains the user file is added to the Mantid path.")
+        self._user_file = file_path
+        reader = UserFileReader(self._user_file)
+        user_file_items = reader.read_user_file()
+        self.add_state_settings(user_file_items)
+
+    def add_state_settings(self, user_file_items):
+        """
+        This allows for a usage of the UserFileStateDirector with externally provided user_file_items or internally
+        via the set_user_file method.
+
+        @param user_file_items: a list of parsed user file items.
+        """
+        # ----------------------------------------------------
+        # Populate the different sub states from the user file
+        # ----------------------------------------------------
+        # Data state
+        self._add_information_to_data_state(user_file_items)
+
+        # Mask state
+        self._set_up_mask_state(user_file_items)
+
+        # Reduction state
+        self._set_up_reduction_state(user_file_items)
+
+        # Move state
+        self._set_up_move_state(user_file_items)
+
+        # Wavelength state
+        self._set_up_wavelength_state(user_file_items)
+
+        # Slice event state
+        self._set_up_slice_event_state(user_file_items)
+        # There does not seem to be a command for this currently -- this should be added in the future
+
+        # Scale state
+        self._set_up_scale_state(user_file_items)
+
+        # Adjustment state and its substates
+        self._set_up_adjustment_state(user_file_items)
+        self._set_up_normalize_to_monitor_state(user_file_items)
+        self._set_up_calculate_transmission(user_file_items)
+        self._set_up_wavelength_and_pixel_adjustment(user_file_items)
+
+        # Convert to Q state
+        self._set_up_convert_to_q_state(user_file_items)
+
+        # Compatibility state
+        self._set_up_compatibility(user_file_items)
+
+    def construct(self):
+        # Create the different sub states and add them to the state
+        # Mask state
+        mask_state = self._mask_builder.build()
+        mask_state.validate()
+        self._state_builder.set_mask(mask_state)
+
+        # Reduction state
+        reduction_state = self._reduction_builder.build()
+        reduction_state.validate()
+        self._state_builder.set_reduction(reduction_state)
+
+        # Move state
+        move_state = self._move_builder.build()
+        move_state.validate()
+        self._state_builder.set_move(move_state)
+
+        # Slice Event state
+        slice_event_state = self._slice_event_builder.build()
+        slice_event_state.validate()
+        self._state_builder.set_slice(slice_event_state)
+
+        # Wavelength conversion state
+        wavelength_state = self._wavelength_builder.build()
+        wavelength_state.validate()
+        self._state_builder.set_wavelength(wavelength_state)
+
+        # Save state
+        save_state = self._save_builder.build()
+        save_state.validate()
+        self._state_builder.set_save(save_state)
+
+        # Scale state
+        scale_state = self._scale_builder.build()
+        scale_state.validate()
+        self._state_builder.set_scale(scale_state)
+
+        # Adjustment state with the sub states
+        normalize_to_monitor_state = self._normalize_to_monitor_builder.build()
+        self._adjustment_builder.set_normalize_to_monitor(normalize_to_monitor_state)
+
+        calculate_transmission_state = self._calculate_transmission_builder.build()
+        self._adjustment_builder.set_calculate_transmission(calculate_transmission_state)
+
+        wavelength_and_pixel_adjustment_state = self._wavelength_and_pixel_adjustment_builder.build()
+        self._adjustment_builder.set_wavelength_and_pixel_adjustment(wavelength_and_pixel_adjustment_state)
+
+        adjustment_state = self._adjustment_builder.build()
+        adjustment_state.validate()
+
+        self._state_builder.set_adjustment(adjustment_state)
+
+        # Convert to Q state
+        convert_to_q_state = self._convert_to_q_builder.build()
+        convert_to_q_state.validate()
+        self._state_builder.set_convert_to_q(convert_to_q_state)
+
+        # Compatibility state
+        compatibility_state = self._compatibility_builder.build()
+        compatibility_state.validate()
+        self._state_builder.set_compatibility(compatibility_state)
+
+        # Data state
+        self._state_builder.set_data(self._data)
+
+        return self._state_builder.build()
+
+    def _set_up_move_state(self, user_file_items):  # noqa
+        # The elements which can be set up via the user file are:
+        # 1. Correction in X, Y, Z
+        # 2. Rotation
+        # 3. Side translation
+        # 4. Xtilt and Ytilt
+        # 5. Sample offset
+        # 6. Monitor 4 offset
+        # 7. Beam centre
+
+        # ---------------------------
+        # Correction for X, Y, Z
+        # ---------------------------
+        if DetectorId.correction_x in user_file_items:
+            corrections_in_x = user_file_items[DetectorId.correction_x]
+            for correction_x in corrections_in_x:
+                if correction_x.detector_type is DetectorType.HAB:
+                    self._move_builder.set_HAB_x_translation_correction(convert_mm_to_m(correction_x.entry))
+                elif correction_x.detector_type is DetectorType.LAB:
+                    self._move_builder.set_LAB_x_translation_correction(convert_mm_to_m(correction_x.entry))
+                else:
+                    raise RuntimeError("UserFileStateDirector: An unknown detector {0} was used for the"
+                                       " x correction.".format(correction_x.detector_type))
+
+        if DetectorId.correction_y in user_file_items:
+            corrections_in_y = user_file_items[DetectorId.correction_y]
+            for correction_y in corrections_in_y:
+                if correction_y.detector_type is DetectorType.HAB:
+                    self._move_builder.set_HAB_y_translation_correction(convert_mm_to_m(correction_y.entry))
+                elif correction_y.detector_type is DetectorType.LAB:
+                    self._move_builder.set_LAB_y_translation_correction(convert_mm_to_m(correction_y.entry))
+                else:
+                    raise RuntimeError("UserFileStateDirector: An unknown detector {0} was used for the"
+                                       " y correction.".format(correction_y.detector_type))
+
+        if DetectorId.correction_z in user_file_items:
+            corrections_in_z = user_file_items[DetectorId.correction_z]
+            for correction_z in corrections_in_z:
+                if correction_z.detector_type is DetectorType.HAB:
+                    self._move_builder.set_HAB_z_translation_correction(convert_mm_to_m(correction_z.entry))
+                elif correction_z.detector_type is DetectorType.LAB:
+                    self._move_builder.set_LAB_z_translation_correction(convert_mm_to_m(correction_z.entry))
+                else:
+                    raise RuntimeError("UserFileStateDirector: An unknown detector {0} was used for the"
+                                       " z correction.".format(correction_z.detector_type))
+
+        # ---------------------------
+        # Correction for Rotation
+        # ---------------------------
+        if DetectorId.correction_rotation in user_file_items:
+            rotation_correction = user_file_items[DetectorId.correction_rotation]
+            # Should the user have chosen several values, then the last element is selected
+            check_if_contains_only_one_element(rotation_correction, DetectorId.correction_rotation)
+            rotation_correction = rotation_correction[-1]
+            if rotation_correction.detector_type is DetectorType.HAB:
+                self._move_builder.set_HAB_rotation_correction(rotation_correction.entry)
+            elif rotation_correction.detector_type is DetectorType.LAB:
+                self._move_builder.set_LAB_rotation_correction(rotation_correction.entry)
+            else:
+                raise RuntimeError("UserFileStateDirector: An unknown detector {0} was used for the"
+                                   " rotation correction.".format(rotation_correction.detector_type))
+
+        # ---------------------------
+        # Correction for Radius
+        # ---------------------------
+        if DetectorId.correction_radius in user_file_items:
+            radius_corrections = user_file_items[DetectorId.correction_radius]
+            for radius_correction in radius_corrections:
+                if radius_correction.detector_type is DetectorType.HAB:
+                    self._move_builder.set_HAB_radius_correction(convert_mm_to_m(radius_correction.entry))
+                elif radius_correction.detector_type is DetectorType.LAB:
+                    self._move_builder.set_LAB_radius_correction(convert_mm_to_m(radius_correction.entry))
+                else:
+                    raise RuntimeError("UserFileStateDirector: An unknown detector {0} was used for the"
+                                       " radius correction.".format(radius_correction.detector_type))
+
+        # ---------------------------
+        # Correction for Translation
+        # ---------------------------
+        if DetectorId.correction_translation in user_file_items:
+            side_corrections = user_file_items[DetectorId.correction_translation]
+            for side_correction in side_corrections:
+                if side_correction.detector_type is DetectorType.HAB:
+                    self._move_builder.set_HAB_side_correction(convert_mm_to_m(side_correction.entry))
+                elif side_correction.detector_type is DetectorType.LAB:
+                    self._move_builder.set_LAB_side_correction(convert_mm_to_m(side_correction.entry))
+                else:
+                    raise RuntimeError("UserFileStateDirector: An unknown detector {0} was used for the"
+                                       " side correction.".format(side_correction.detector_type))
+
+        # ---------------------------
+        # Tilt
+        # ---------------------------
+        if DetectorId.correction_x_tilt in user_file_items:
+            tilt_correction = user_file_items[DetectorId.correction_x_tilt]
+            tilt_correction = tilt_correction[-1]
+            if tilt_correction.detector_type is DetectorType.HAB:
+                self._move_builder.set_HAB_x_tilt_correction(tilt_correction.entry)
+            elif tilt_correction.detector_type is DetectorType.LAB:
+                self._move_builder.set_LAB_side_correction(tilt_correction.entry)
+            else:
+                raise RuntimeError("UserFileStateDirector: An unknown detector {0} was used for the"
+                                   " titlt correction.".format(tilt_correction.detector_type))
+
+        if DetectorId.correction_y_tilt in user_file_items:
+            tilt_correction = user_file_items[DetectorId.correction_y_tilt]
+            tilt_correction = tilt_correction[-1]
+            if tilt_correction.detector_type is DetectorType.HAB:
+                self._move_builder.set_HAB_y_tilt_correction(tilt_correction.entry)
+            elif tilt_correction.detector_type is DetectorType.LAB:
+                self._move_builder.set_LAB_side_correction(tilt_correction.entry)
+            else:
+                raise RuntimeError("UserFileStateDirector: An unknown detector {0} was used for the"
+                                   " titlt correction.".format(tilt_correction.detector_type))
+
+        # ---------------------------
+        # Sample offset
+        # ---------------------------
+        set_single_entry(self._move_builder, "set_sample_offset", SampleId.offset,
+                         user_file_items, apply_to_value=convert_mm_to_m)
+
+        # ---------------------------
+        # Monitor 4 offset; for now this is only SANS2D
+        # ---------------------------
+        if TransId.spec_shift in user_file_items:
+            monitor_4_shift = user_file_items[TransId.spec_shift]
+            # Should the user have chosen several values, then the last element is selected
+            check_if_contains_only_one_element(monitor_4_shift, TransId.spec_shift)
+            monitor_4_shift = monitor_4_shift[-1]
+            set_monitor_4_offset = getattr(self._move_builder, "set_monitor_4_offset", None)
+            if isinstance(set_monitor_4_offset, collections.Callable):
+                self._move_builder.set_monitor_4_offset(convert_mm_to_m(monitor_4_shift))
+            else:
+                log_non_existing_field("set_monitor_4_offset")
+
+        # ---------------------------
+        # Beam Centre, this can be for HAB and LAB
+        # ---------------------------
+        if SetId.centre in user_file_items:
+            beam_centres = user_file_items[SetId.centre]
+            beam_centres_for_hab = [beam_centre for beam_centre in beam_centres if beam_centre.detector_type
+                                    is DetectorType.HAB]
+            beam_centres_for_lab = [beam_centre for beam_centre in beam_centres if beam_centre.detector_type
+                                    is DetectorType.LAB]
+            for beam_centre in beam_centres_for_lab:
+                pos1 = beam_centre.pos1
+                pos2 = beam_centre.pos2
+                self._move_builder.set_LAB_sample_centre_pos1(self._move_builder.convert_pos1(pos1))
+                self._move_builder.set_LAB_sample_centre_pos2(self._move_builder.convert_pos2(pos2))
+                self._move_builder.set_HAB_sample_centre_pos1(self._move_builder.convert_pos1(pos1))
+                self._move_builder.set_HAB_sample_centre_pos2(self._move_builder.convert_pos2(pos2))
+
+            for beam_centre in beam_centres_for_hab:
+                pos1 = beam_centre.pos1
+                pos2 = beam_centre.pos2
+                self._move_builder.set_HAB_sample_centre_pos1(self._move_builder.convert_pos1(pos1))
+                self._move_builder.set_HAB_sample_centre_pos2(self._move_builder.convert_pos2(pos2))
+
+    def _set_up_reduction_state(self, user_file_items):
+        # There are several things that can be extracted from the user file
+        # 1. The reduction mode
+        # 2. The merge behaviour
+        # 3. The dimensionality is not set via the user file
+
+        # ------------------------
+        # Reduction mode
+        # ------------------------
+        set_single_entry(self._reduction_builder, "set_reduction_mode", DetectorId.reduction_mode, user_file_items)
+
+        # -------------------------------
+        # Shift and rescale
+        # -------------------------------
+        set_single_entry(self._reduction_builder, "set_merge_scale", DetectorId.rescale, user_file_items)
+        set_single_entry(self._reduction_builder, "set_merge_shift", DetectorId.shift, user_file_items)
+
+        # -------------------------------
+        # Fitting merged
+        # -------------------------------
+        q_range_min_scale = None
+        q_range_max_scale = None
+        has_rescale_fit = False
+        if DetectorId.rescale_fit in user_file_items:
+            rescale_fits = user_file_items[DetectorId.rescale_fit]
+            # Should the user have chosen several values, then the last element is selected
+            check_if_contains_only_one_element(rescale_fits, DetectorId.rescale_fit)
+            rescale_fit = rescale_fits[-1]
+            q_range_min_scale = rescale_fit.start
+            q_range_max_scale = rescale_fit.stop
+            has_rescale_fit = rescale_fit.use_fit
+
+        q_range_min_shift = None
+        q_range_max_shift = None
+        has_shift_fit = False
+        if DetectorId.shift_fit in user_file_items:
+            shift_fits = user_file_items[DetectorId.shift_fit]
+            # Should the user have chosen several values, then the last element is selected
+            check_if_contains_only_one_element(shift_fits, DetectorId.shift_fit)
+            shift_fit = shift_fits[-1]
+            q_range_min_shift = shift_fit.start
+            q_range_max_shift = shift_fit.stop
+            has_shift_fit = shift_fit.use_fit
+
+        if has_rescale_fit and has_shift_fit:
+            self._reduction_builder.set_merge_fit_mode(FitModeForMerge.Both)
+            min_q = get_min_q_boundary(q_range_min_scale, q_range_min_shift)
+            max_q = get_max_q_boundary(q_range_max_scale, q_range_max_shift)
+            if min_q:
+                self._reduction_builder.set_merge_range_min(min_q)
+            if max_q:
+                self._reduction_builder.set_merge_range_max(max_q)
+        elif has_rescale_fit and not has_shift_fit:
+            self._reduction_builder.set_merge_fit_mode(FitModeForMerge.ScaleOnly)
+            if q_range_min_scale:
+                self._reduction_builder.set_merge_range_min(q_range_min_scale)
+            if q_range_max_scale:
+                self._reduction_builder.set_merge_range_max(q_range_max_scale)
+        elif not has_rescale_fit and has_shift_fit:
+            self._reduction_builder.set_merge_fit_mode(FitModeForMerge.ShiftOnly)
+            if q_range_min_shift:
+                self._reduction_builder.set_merge_range_min(q_range_min_shift)
+            if q_range_max_shift:
+                self._reduction_builder.set_merge_range_max(q_range_max_shift)
+        else:
+            self._reduction_builder.set_merge_fit_mode(FitModeForMerge.NoFit)
+
+        # ------------------------
+        # Reduction Dimensionality
+        # ------------------------
+        set_single_entry(self._reduction_builder, "set_reduction_dimensionality", OtherId.reduction_dimensionality,
+                         user_file_items)
+
+    def _set_up_mask_state(self, user_file_items):  # noqa
+        # Check for the various possible masks that can be present in the user file
+        # This can be:
+        # 1. A line mask
+        # 2. A time mask
+        # 3. A detector-bound time mask
+        # 4. A clean command
+        # 5. A time clear command
+        # 6. A single spectrum mask
+        # 7. A spectrum range mask
+        # 8. A vertical single strip mask
+        # 9. A vertical range strip mask
+        # 10. A horizontal single strip mask
+        # 11. A horizontal range strip mask
+        # 12. A block mask
+        # 13. A cross-type block mask
+        # 14. Angle masking
+        # 15. Mask files
+
+        # ---------------------------------
+        # 1. Line Mask
+        # ---------------------------------
+        if MaskId.line in user_file_items:
+            mask_lines = user_file_items[MaskId.line]
+            # If there were several arms specified then we take only the last
+            check_if_contains_only_one_element(mask_lines, MaskId.line)
+            mask_line = mask_lines[-1]
+            # We need the width and the angle
+            angle = mask_line.angle
+            width = convert_mm_to_m(mask_line.width)
+            # The position is already specified in meters in the user file
+            pos1 = mask_line.x
+            pos2 = mask_line.y
+            if angle is None or width is None:
+                raise RuntimeError("UserFileStateDirector: You specified a line mask without an angle or a width."
+                                   "The parameters were: width {0}; angle {1}; x {2}; y {3}".format(width, angle,
+                                                                                                    pos1, pos2))
+            pos1 = 0.0 if pos1 is None else pos1
+            pos2 = 0.0 if pos2 is None else pos2
+
+            self._mask_builder.set_beam_stop_arm_width(width)
+            self._mask_builder.set_beam_stop_arm_angle(angle)
+            self._mask_builder.set_beam_stop_arm_pos1(pos1)
+            self._mask_builder.set_beam_stop_arm_pos2(pos2)
+
+        # ---------------------------------
+        # 2. General time mask
+        # ---------------------------------
+        if MaskId.time in user_file_items:
+            mask_time_general = user_file_items[MaskId.time]
+            start_time = []
+            stop_time = []
+            for times in mask_time_general:
+                if times.start > times.start:
+                    raise RuntimeError("UserFileStateDirector: You specified a general time mask with a start time {0}"
+                                       " which is larger than the stop time {1} of the mask. This is not"
+                                       " valid.".format(times.start, times.stop))
+                start_time.append(times.start)
+                stop_time.append(times.stop)
+            self._mask_builder.set_bin_mask_general_start(start_time)
+            self._mask_builder.set_bin_mask_general_stop(stop_time)
+
+        # ---------------------------------
+        # 3. Detector-bound time mask
+        # ---------------------------------
+        if MaskId.time_detector in user_file_items:
+            mask_times = user_file_items[MaskId.time_detector]
+            start_times_hab = []
+            stop_times_hab = []
+            start_times_lab = []
+            stop_times_lab = []
+            for times in mask_times:
+                if times.start > times.start:
+                    raise RuntimeError("UserFileStateDirector: You specified a general time mask with a start time {0}"
+                                       " which is larger than the stop time {1} of the mask. This is not"
+                                       " valid.".format(times.start, times.stop))
+                if times.detector_type is DetectorType.HAB:
+                    start_times_hab.append(times.start)
+                    stop_times_hab.append(times.stop)
+                elif times.detector_type is DetectorType.LAB:
+                    start_times_hab.append(times.start)
+                    stop_times_hab.append(times.stop)
+                else:
+                    RuntimeError("UserFileStateDirector: The specified detector {0} is not "
+                                 "known".format(times.detector_type))
+            if start_times_hab:
+                self._mask_builder.set_HAB_bin_mask_start(start_times_hab)
+            if stop_times_hab:
+                self._mask_builder.set_HAB_bin_mask_stop(stop_times_hab)
+            if start_times_lab:
+                self._mask_builder.set_LAB_bin_mask_start(start_times_lab)
+            if stop_times_lab:
+                self._mask_builder.set_LAB_bin_mask_stop(stop_times_lab)
+
+        # ---------------------------------
+        # 4. Clear detector
+        # ---------------------------------
+        if MaskId.clear_detector_mask in user_file_items:
+            clear_detector_mask = user_file_items[MaskId.clear_detector_mask]
+            check_if_contains_only_one_element(clear_detector_mask, MaskId.clear_detector_mask)
+            # We select the entry which was added last.
+            clear_detector_mask = clear_detector_mask[-1]
+            self._mask_builder.set_clear(clear_detector_mask)
+
+        # ---------------------------------
+        # 5. Clear time
+        # ---------------------------------
+        if MaskId.clear_time_mask in user_file_items:
+            clear_time_mask = user_file_items[MaskId.clear_time_mask]
+            check_if_contains_only_one_element(clear_time_mask, MaskId.clear_time_mask)
+            # We select the entry which was added last.
+            clear_time_mask = clear_time_mask[-1]
+            self._mask_builder.set_clear_time(clear_time_mask)
+
+        # ---------------------------------
+        # 6. Single Spectrum
+        # ---------------------------------
+        if MaskId.single_spectrum_mask in user_file_items:
+            single_spectra = user_file_items[MaskId.single_spectrum_mask]
+            self._mask_builder.set_single_spectra(single_spectra)
+
+        # ---------------------------------
+        # 7. Spectrum Range
+        # ---------------------------------
+        if MaskId.spectrum_range_mask in user_file_items:
+            spectrum_ranges = user_file_items[MaskId.spectrum_range_mask]
+            start_range = []
+            stop_range = []
+            for spectrum_range in spectrum_ranges:
+                if spectrum_range.start > spectrum_range.start:
+                    raise RuntimeError("UserFileStateDirector: You specified a spectrum range with a start value {0}"
+                                       " which is larger than the stop value {1}. This is not"
+                                       " valid.".format(spectrum_range.start, spectrum_range.stop))
+                start_range.append(spectrum_range.start)
+                stop_range.append(spectrum_range.stop)
+            self._mask_builder.set_spectrum_range_start(start_range)
+            self._mask_builder.set_spectrum_range_stop(stop_range)
+
+        # ---------------------------------
+        # 8. Vertical single strip
+        # ---------------------------------
+        if MaskId.vertical_single_strip_mask in user_file_items:
+            single_vertical_strip_masks = user_file_items[MaskId.vertical_single_strip_mask]
+            entry_hab = []
+            entry_lab = []
+            for single_vertical_strip_mask in single_vertical_strip_masks:
+                if single_vertical_strip_mask.detector_type is DetectorType.HAB:
+                    entry_hab.append(single_vertical_strip_mask.entry)
+                elif single_vertical_strip_mask.detector_type is DetectorType.LAB:
+                    entry_lab.append(single_vertical_strip_mask.entry)
+                else:
+                    raise RuntimeError("UserFileStateDirector: The vertical single strip mask {0} has an unknown "
+                                       "detector {1} associated"
+                                       " with it.".format(single_vertical_strip_mask.entry,
+                                                          single_vertical_strip_mask.detector_type))
+            if entry_hab:
+                self._mask_builder.set_HAB_single_vertical_strip_mask(entry_hab)
+            if entry_lab:
+                self._mask_builder.set_LAB_single_vertical_strip_mask(entry_lab)
+
+        # ---------------------------------
+        # 9. Vertical range strip
+        # ---------------------------------
+        if MaskId.vertical_range_strip_mask in user_file_items:
+            range_vertical_strip_masks = user_file_items[MaskId.vertical_range_strip_mask]
+            start_hab = []
+            stop_hab = []
+            start_lab = []
+            stop_lab = []
+            for range_vertical_strip_mask in range_vertical_strip_masks:
+                if range_vertical_strip_mask.detector_type is DetectorType.HAB:
+                    start_hab.append(range_vertical_strip_mask.start)
+                    stop_hab.append(range_vertical_strip_mask.stop)
+                elif range_vertical_strip_mask.detector_type is DetectorType.LAB:
+                    start_lab.append(range_vertical_strip_mask.start)
+                    stop_lab.append(range_vertical_strip_mask.stop)
+                else:
+                    raise RuntimeError("UserFileStateDirector: The vertical range strip mask {0} has an unknown "
+                                       "detector {1} associated "
+                                       "with it.".format(range_vertical_strip_mask.entry,
+                                                         range_vertical_strip_mask.detector_type))
+            if start_hab:
+                self._mask_builder.set_HAB_range_vertical_strip_start(start_hab)
+            if stop_hab:
+                self._mask_builder.set_HAB_range_vertical_strip_stop(stop_hab)
+            if start_lab:
+                self._mask_builder.set_LAB_range_vertical_strip_start(start_lab)
+            if stop_lab:
+                self._mask_builder.set_LAB_range_vertical_strip_stop(stop_lab)
+
+        # ---------------------------------
+        # 10. Horizontal single strip
+        # ---------------------------------
+        if MaskId.horizontal_single_strip_mask in user_file_items:
+            single_horizontal_strip_masks = user_file_items[MaskId.horizontal_single_strip_mask]
+            entry_hab = []
+            entry_lab = []
+            for single_horizontal_strip_mask in single_horizontal_strip_masks:
+                if single_horizontal_strip_mask.detector_type is DetectorType.HAB:
+                    entry_hab.append(single_horizontal_strip_mask.entry)
+                elif single_horizontal_strip_mask.detector_type is DetectorType.LAB:
+                    entry_lab.append(single_horizontal_strip_mask.entry)
+                else:
+                    raise RuntimeError("UserFileStateDirector: The horizontal single strip mask {0} has an unknown "
+                                       "detector {1} associated"
+                                       " with it.".format(single_horizontal_strip_mask.entry,
+                                                          single_horizontal_strip_mask.detector_type))
+            if entry_hab:
+                self._mask_builder.set_HAB_single_horizontal_strip_mask(entry_hab)
+            if entry_lab:
+                self._mask_builder.set_LAB_single_horizontal_strip_mask(entry_lab)
+
+        # ---------------------------------
+        # 11. Horizontal range strip
+        # ---------------------------------
+        if MaskId.horizontal_range_strip_mask in user_file_items:
+            range_horizontal_strip_masks = user_file_items[MaskId.horizontal_range_strip_mask]
+            start_hab = []
+            stop_hab = []
+            start_lab = []
+            stop_lab = []
+            for range_horizontal_strip_mask in range_horizontal_strip_masks:
+                if range_horizontal_strip_mask.detector_type is DetectorType.HAB:
+                    start_hab.append(range_horizontal_strip_mask.start)
+                    stop_hab.append(range_horizontal_strip_mask.stop)
+                elif range_horizontal_strip_mask.detector_type is DetectorType.LAB:
+                    start_lab.append(range_horizontal_strip_mask.start)
+                    stop_lab.append(range_horizontal_strip_mask.stop)
+                else:
+                    raise RuntimeError("UserFileStateDirector: The vertical range strip mask {0} has an unknown "
+                                       "detector {1} associated "
+                                       "with it.".format(range_horizontal_strip_mask.entry,
+                                                         range_horizontal_strip_mask.detector_type))
+            if start_hab:
+                self._mask_builder.set_HAB_range_horizontal_strip_start(start_hab)
+            if stop_hab:
+                self._mask_builder.set_HAB_range_horizontal_strip_stop(stop_hab)
+            if start_lab:
+                self._mask_builder.set_LAB_range_horizontal_strip_start(start_lab)
+            if stop_lab:
+                self._mask_builder.set_LAB_range_horizontal_strip_stop(stop_lab)
+
+        # ---------------------------------
+        # 12. Block
+        # ---------------------------------
+        if MaskId.block in user_file_items:
+            blocks = user_file_items[MaskId.block]
+            horizontal_start_hab = []
+            horizontal_stop_hab = []
+            vertical_start_hab = []
+            vertical_stop_hab = []
+            horizontal_start_lab = []
+            horizontal_stop_lab = []
+            vertical_start_lab = []
+            vertical_stop_lab = []
+
+            for block in blocks:
+                if block.horizontal1 > block.horizontal2 or block.vertical1 > block.vertical2:
+                    raise RuntimeError("UserFileStateDirector: The block mask seems to have inconsistent entries. "
+                                       "The values are horizontal_start {0}; horizontal_stop {1}; vertical_start {2};"
+                                       " vertical_stop {3}".format(block.horizontal1, block.horizontal2,
+                                                                   block.vertical1, block.vertical2))
+                if block.detector_type is DetectorType.HAB:
+                    horizontal_start_hab.append(block.horizontal1)
+                    horizontal_stop_hab.append(block.horizontal2)
+                    vertical_start_hab.append(block.vertical1)
+                    vertical_stop_hab.append(block.vertical2)
+                elif block.detector_type is DetectorType.LAB:
+                    horizontal_start_lab.append(block.horizontal1)
+                    horizontal_stop_lab.append(block.horizontal2)
+                    vertical_start_lab.append(block.vertical1)
+                    vertical_stop_lab.append(block.vertical2)
+                else:
+                    raise RuntimeError("UserFileStateDirector: The block mask has an unknown "
+                                       "detector {0} associated "
+                                       "with it.".format(block.detector_type))
+            if horizontal_start_hab:
+                self._mask_builder.set_HAB_block_horizontal_start(horizontal_start_hab)
+            if horizontal_stop_hab:
+                self._mask_builder.set_HAB_block_horizontal_stop(horizontal_stop_hab)
+            if vertical_start_lab:
+                self._mask_builder.set_LAB_block_vertical_start(vertical_start_lab)
+            if vertical_stop_lab:
+                self._mask_builder.set_LAB_block_vertical_stop(vertical_stop_lab)
+
+        # ---------------------------------
+        # 13. Block cross
+        # ---------------------------------
+        if MaskId.block_cross in user_file_items:
+            block_crosses = user_file_items[MaskId.block_cross]
+            horizontal_hab = []
+            vertical_hab = []
+            horizontal_lab = []
+            vertical_lab = []
+            for block_cross in block_crosses:
+                if block_cross.detector_type is DetectorType.HAB:
+                    horizontal_hab.append(block_cross.horizontal)
+                    vertical_hab.append(block_cross.vertical)
+                elif block_cross.detector_type is DetectorType.LAB:
+                    horizontal_lab.append(block_cross.horizontal)
+                    vertical_lab.append(block_cross.vertical)
+                else:
+                    raise RuntimeError("UserFileStateDirector: The block cross mask has an unknown "
+                                       "detector {0} associated "
+                                       "with it.".format(block_cross.detector_type))
+            if horizontal_hab:
+                self._mask_builder.set_HAB_block_cross_horizontal(horizontal_hab)
+            if vertical_hab:
+                self._mask_builder.set_HAB_block_cross_vertical(vertical_hab)
+            if horizontal_lab:
+                self._mask_builder.set_LAB_block_cross_horizontal(horizontal_lab)
+            if vertical_lab:
+                self._mask_builder.set_LAB_block_cross_vertical(vertical_lab)
+
+        # ------------------------------------------------------------
+        # 14. Angles --> they are specified in L/Phi
+        # -----------------------------------------------------------
+        if LimitsId.angle in user_file_items:
+            angles = user_file_items[LimitsId.angle]
+            # Should the user have chosen several values, then the last element is selected
+            check_if_contains_only_one_element(angles, LimitsId.angle)
+            angle = angles[-1]
+            self._mask_builder.set_phi_min(angle.min)
+            self._mask_builder.set_phi_max(angle.max)
+            self._mask_builder.set_use_mask_phi_mirror(angle.use_mirror)
+
+        # ------------------------------------------------------------
+        # 15. Maskfiles
+        # -----------------------------------------------------------
+        if MaskId.file in user_file_items:
+            mask_files = user_file_items[MaskId.file]
+            self._mask_builder.set_mask_files(mask_files)
+
+        # ------------------------------------------------------------
+        # 16. Radius masks
+        # -----------------------------------------------------------
+        if LimitsId.radius in user_file_items:
+            radii = user_file_items[LimitsId.radius]
+            # Should the user have chosen several values, then the last element is selected
+            check_if_contains_only_one_element(radii, LimitsId.radius)
+            radius = radii[-1]
+            if radius.start > radius.stop > 0:
+                raise RuntimeError("UserFileStateDirector: The inner radius {0} appears to be larger that the outer"
+                                   " radius {1} of the mask.".format(radius.start, radius.stop))
+            min_value = None if radius.start is None else convert_mm_to_m(radius.start)
+            max_value = None if radius.stop is None else convert_mm_to_m(radius.stop)
+            self._mask_builder.set_radius_min(min_value)
+            self._mask_builder.set_radius_max(max_value)
+
+    def _set_up_wavelength_state(self, user_file_items):
+        if LimitsId.wavelength in user_file_items:
+            wavelength_limits = user_file_items[LimitsId.wavelength]
+            check_if_contains_only_one_element(wavelength_limits, LimitsId.wavelength)
+            wavelength_limits = wavelength_limits[-1]
+            self._wavelength_builder.set_wavelength_low(wavelength_limits.start)
+            self._wavelength_builder.set_wavelength_high(wavelength_limits.stop)
+            self._wavelength_builder.set_wavelength_step(wavelength_limits.step)
+            self._wavelength_builder.set_wavelength_step_type(wavelength_limits.step_type)
+
+    def _set_up_slice_event_state(self, user_file_items):
+        # Setting up the slice limits is current
+        if OtherId.event_slices in user_file_items:
+            event_slices = user_file_items[OtherId.event_slices]
+            check_if_contains_only_one_element(event_slices, OtherId.event_slices)
+            event_slices = event_slices[-1]
+            # The events binning can come in three forms.
+            # 1. As a simple range object
+            # 2. As an already parsed rebin array, ie min, step, max
+            # 3. As a string. Note that this includes custom commands.
+            if isinstance(event_slices, simple_range):
+                start, stop = get_ranges_for_rebin_setting(event_slices.start, event_slices.stop,
+                                                           event_slices.step, event_slices.step_type)
+            elif isinstance(event_slices, rebin_string_values):
+                start, stop = get_ranges_for_rebin_array(event_slices.value)
+            else:
+                start, stop = get_ranges_from_event_slice_setting(event_slices.value)
+            self._slice_event_builder.set_start_time(start)
+            self._slice_event_builder.set_end_time(stop)
+
+    def _set_up_scale_state(self, user_file_items):
+        # We only extract the first entry here, ie the s entry. Although there are other entries which a user can
+        # specify such as a, b, c, d they seem to be
+        if SetId.scales in user_file_items:
+            scales = user_file_items[SetId.scales]
+            check_if_contains_only_one_element(scales, SetId.scales)
+            scales = scales[-1]
+            self._scale_builder.set_scale(scales.s)
+
+    def _set_up_convert_to_q_state(self, user_file_items):
+        # Get the radius cut off if any is present
+        set_single_entry(self._convert_to_q_builder, "set_radius_cutoff", LimitsId.radius_cut, user_file_items,
+                         apply_to_value=convert_mm_to_m)
+
+        # Get the wavelength cut off if any is present
+        set_single_entry(self._convert_to_q_builder, "set_wavelength_cutoff", LimitsId.wavelength_cut,
+                         user_file_items)
+
+        # Get the 1D q values
+        if LimitsId.q in user_file_items:
+            limits_q = user_file_items[LimitsId.q]
+            check_if_contains_only_one_element(limits_q, LimitsId.q)
+            limits_q = limits_q[-1]
+            # Now we have to check if we have a simple pattern or a more complex pattern at hand
+            is_complex = isinstance(limits_q, complex_range)
+            self._convert_to_q_builder.set_q_min(limits_q.start)
+            self._convert_to_q_builder.set_q_max(limits_q.stop)
+            if is_complex:
+                self._convert_to_q_builder.set_q_step(limits_q.step1)
+                self._convert_to_q_builder.set_q_step_type(limits_q.step_type1)
+                self._convert_to_q_builder.set_q_mid(limits_q.mid)
+                self._convert_to_q_builder.set_q_step2(limits_q.step2)
+                self._convert_to_q_builder.set_q_step_type2(limits_q.step_type2)
+            else:
+                self._convert_to_q_builder.set_q_step(limits_q.step)
+                self._convert_to_q_builder.set_q_step_type(limits_q.step_type)
+
+        # Get the 2D q values
+        if LimitsId.qxy in user_file_items:
+            limits_qxy = user_file_items[LimitsId.qxy]
+            check_if_contains_only_one_element(limits_qxy, LimitsId.qxy)
+            limits_qxy = limits_qxy[-1]
+            # Now we have to check if we have a simple pattern or a more complex pattern at hand
+            is_complex = isinstance(limits_qxy, complex_range)
+            self._convert_to_q_builder.set_q_xy_max(limits_qxy.stop)
+            if is_complex:
+                # Note that it has not been implemented in the old reducer, but the documentation is
+                #  suggesting that it is available. Hence we throw here.
+                raise RuntimeError("Qxy cannot handle settings of type: L/Q l1,dl1,l3,dl2,l2 [/LIN|/LOG] ")
+            else:
+                self._convert_to_q_builder.set_q_xy_step(limits_qxy.step)
+                self._convert_to_q_builder.set_q_xy_step_type(limits_qxy.step_type)
+
+        # Get the Gravity settings
+        set_single_entry(self._convert_to_q_builder, "set_use_gravity", GravityId.on_off, user_file_items)
+        set_single_entry(self._convert_to_q_builder, "set_gravity_extra_length", GravityId.extra_length,
+                         user_file_items)
+
+        # Get the QResolution settings set_q_resolution_delta_r
+        set_single_entry(self._convert_to_q_builder, "set_use_q_resolution", QResolutionId.on, user_file_items)
+        set_single_entry(self._convert_to_q_builder, "set_q_resolution_delta_r", QResolutionId.delta_r,
+                         user_file_items, apply_to_value=convert_mm_to_m)
+        set_single_entry(self._convert_to_q_builder, "set_q_resolution_collimation_length",
+                         QResolutionId.collimation_length, user_file_items)
+        set_single_entry(self._convert_to_q_builder, "set_q_resolution_a1", QResolutionId.a1, user_file_items,
+                         apply_to_value=convert_mm_to_m)
+        set_single_entry(self._convert_to_q_builder, "set_q_resolution_a2", QResolutionId.a2, user_file_items,
+                         apply_to_value=convert_mm_to_m)
+        set_single_entry(self._convert_to_q_builder, "set_moderator_file", QResolutionId.moderator,
+                         user_file_items)
+        set_single_entry(self._convert_to_q_builder, "set_q_resolution_h1", QResolutionId.h1, user_file_items,
+                         apply_to_value=convert_mm_to_m)
+        set_single_entry(self._convert_to_q_builder, "set_q_resolution_h2", QResolutionId.h2, user_file_items,
+                         apply_to_value=convert_mm_to_m)
+        set_single_entry(self._convert_to_q_builder, "set_q_resolution_w1", QResolutionId.w1, user_file_items,
+                         apply_to_value=convert_mm_to_m)
+        set_single_entry(self._convert_to_q_builder, "set_q_resolution_w2", QResolutionId.w2, user_file_items,
+                         apply_to_value=convert_mm_to_m)
+
+        # ------------------------
+        # Reduction Dimensionality
+        # ------------------------
+        set_single_entry(self._convert_to_q_builder, "set_reduction_dimensionality", OtherId.reduction_dimensionality,
+                         user_file_items)
+
+    def _set_up_adjustment_state(self, user_file_items):
+        # Get the wide angle correction setting
+        set_single_entry(self._adjustment_builder, "set_wide_angle_correction", SampleId.path, user_file_items)
+
+    def _set_up_normalize_to_monitor_state(self, user_file_items):
+        # Extract the incident monitor and which type of rebinning to use (interpolating or normal)
+        if MonId.spectrum in user_file_items:
+            mon_spectrum = user_file_items[MonId.spectrum]
+            mon_spec = [spec for spec in mon_spectrum if not spec.is_trans]
+            mon_spec = mon_spec[-1]
+            rebin_type = RebinType.InterpolatingRebin if mon_spec.interpolate else RebinType.Rebin
+            self._normalize_to_monitor_builder.set_rebin_type(rebin_type)
+            self._normalize_to_monitor_builder.set_incident_monitor(mon_spec.spectrum)
+
+        # The prompt peak correction values
+        set_prompt_peak_correction(self._normalize_to_monitor_builder, user_file_items)
+
+        # The general background settings
+        set_background_tof_general(self._normalize_to_monitor_builder, user_file_items)
+
+        # The monitor-specific background settings
+        set_background_tof_monitor(self._normalize_to_monitor_builder, user_file_items)
+
+        # Get the wavelength rebin settings
+        set_wavelength_limits(self._normalize_to_monitor_builder, user_file_items)
+
+    def _set_up_calculate_transmission(self, user_file_items):
+        # Transmission radius
+        set_single_entry(self._calculate_transmission_builder, "set_transmission_radius_on_detector", TransId.radius,
+                         user_file_items, apply_to_value=convert_mm_to_m)
+
+        # List of transmission roi files
+        if TransId.roi in user_file_items:
+            trans_roi = user_file_items[TransId.roi]
+            self._calculate_transmission_builder.set_transmission_roi_files(trans_roi)
+
+        # List of transmission mask files
+        if TransId.mask in user_file_items:
+            trans_mask = user_file_items[TransId.mask]
+            self._calculate_transmission_builder.set_transmission_mask_files(trans_mask)
+
+        # The prompt peak correction values
+        set_prompt_peak_correction(self._calculate_transmission_builder, user_file_items)
+
+        # The transmission spectrum
+        if TransId.spec in user_file_items:
+            trans_spec = user_file_items[TransId.spec]
+            # Should the user have chosen several values, then the last element is selected
+            check_if_contains_only_one_element(trans_spec, TransId.spec)
+            trans_spec = trans_spec[-1]
+            self._calculate_transmission_builder.set_transmission_monitor(trans_spec)
+
+        # The incident monitor spectrum for transmission calculation
+        if MonId.spectrum in user_file_items:
+            mon_spectrum = user_file_items[MonId.spectrum]
+            mon_spec = [spec for spec in mon_spectrum if spec.is_trans]
+            mon_spec = mon_spec[-1]
+            rebin_type = RebinType.InterpolatingRebin if mon_spec.interpolate else RebinType.Rebin
+            self._calculate_transmission_builder.set_rebin_type(rebin_type)
+            self._calculate_transmission_builder.set_incident_monitor(mon_spec.spectrum)
+
+        # The general background settings
+        set_background_tof_general(self._calculate_transmission_builder, user_file_items)
+
+        # The monitor-specific background settings
+        set_background_tof_monitor(self._calculate_transmission_builder, user_file_items)
+
+        # The roi-specific background settings
+        if BackId.trans in user_file_items:
+            back_trans = user_file_items[BackId.trans]
+            # Should the user have chosen several values, then the last element is selected
+            check_if_contains_only_one_element(back_trans, BackId.trans)
+            back_trans = back_trans[-1]
+            self._calculate_transmission_builder.set_background_TOF_roi_start(back_trans.start)
+            self._calculate_transmission_builder.set_background_TOF_roi_stop(back_trans.stop)
+
+        # Set the fit settings
+        if FitId.general in user_file_items:
+            fit_general = user_file_items[FitId.general]
+            # We can have settings for both the sample or the can or individually
+            # There can be three types of settings:
+            # 1. General settings where the entry data_type is not specified. Settings apply to both sample and can
+            # 2. Sample settings
+            # 3. Can settings
+            # We first apply the general settings. Specialized settings for can or sample override the general settings
+            # As usual if there are multiple settings for a specific case, then the last in the list is used.
+
+            # 1. General settings
+            general_settings = [item for item in fit_general if item.data_type is None]
+            if general_settings:
+                check_if_contains_only_one_element(general_settings, FitId.general)
+                general_settings = general_settings[-1]
+                self._calculate_transmission_builder.set_Sample_fit_type(general_settings.fit_type)
+                self._calculate_transmission_builder.set_Sample_polynomial_order(general_settings.polynomial_order)
+                self._calculate_transmission_builder.set_Sample_wavelength_low(general_settings.start)
+                self._calculate_transmission_builder.set_Sample_wavelength_high(general_settings.stop)
+                self._calculate_transmission_builder.set_Can_fit_type(general_settings.fit_type)
+                self._calculate_transmission_builder.set_Can_polynomial_order(general_settings.polynomial_order)
+                self._calculate_transmission_builder.set_Can_wavelength_low(general_settings.start)
+                self._calculate_transmission_builder.set_Can_wavelength_high(general_settings.stop)
+
+            # 2. Sample settings
+            sample_settings = [item for item in fit_general if item.data_type is DataType.Sample]
+            if sample_settings:
+                check_if_contains_only_one_element(sample_settings, FitId.general)
+                sample_settings = sample_settings[-1]
+                self._calculate_transmission_builder.set_Sample_fit_type(sample_settings.fit_type)
+                self._calculate_transmission_builder.set_Sample_polynomial_order(sample_settings.polynomial_order)
+                self._calculate_transmission_builder.set_Sample_wavelength_low(sample_settings.start)
+                self._calculate_transmission_builder.set_Sample_wavelength_high(sample_settings.stop)
+
+            # 2. Can settings
+            can_settings = [item for item in fit_general if item.data_type is DataType.Can]
+            if can_settings:
+                check_if_contains_only_one_element(can_settings, FitId.general)
+                can_settings = can_settings[-1]
+                self._calculate_transmission_builder.set_Can_fit_type(can_settings.fit_type)
+                self._calculate_transmission_builder.set_Can_polynomial_order(can_settings.polynomial_order)
+                self._calculate_transmission_builder.set_Can_wavelength_low(can_settings.start)
+                self._calculate_transmission_builder.set_Can_wavelength_high(can_settings.stop)
+
+        # Set the wavelength default configuration
+        if LimitsId.wavelength in user_file_items:
+            wavelength_limits = user_file_items[LimitsId.wavelength]
+            check_if_contains_only_one_element(wavelength_limits, LimitsId.wavelength)
+            wavelength_limits = wavelength_limits[-1]
+            self._calculate_transmission_builder.set_wavelength_low(wavelength_limits.start)
+            self._calculate_transmission_builder.set_wavelength_high(wavelength_limits.stop)
+            self._calculate_transmission_builder.set_wavelength_step(wavelength_limits.step)
+            self._calculate_transmission_builder.set_wavelength_step_type(wavelength_limits.step_type)
+
+        # Set the full wavelength range. Note that this can currently only be set from the ISISCommandInterface
+        if OtherId.use_full_wavelength_range in user_file_items:
+            use_full_wavelength_range = user_file_items[OtherId.use_full_wavelength_range]
+            check_if_contains_only_one_element(use_full_wavelength_range, OtherId.use_full_wavelength_range)
+            use_full_wavelength_range = use_full_wavelength_range[-1]
+            self._calculate_transmission_builder.set_use_full_wavelength_range(use_full_wavelength_range)
+
+    def _set_up_wavelength_and_pixel_adjustment(self, user_file_items):
+        # Get the flat/flood files. There can be entries for LAB and HAB.
+        if MonId.flat in user_file_items:
+            mon_flat = user_file_items[MonId.flat]
+            hab_flat_entries = [item for item in mon_flat if item.detector_type is DetectorType.HAB]
+            lab_flat_entries = [item for item in mon_flat if item.detector_type is DetectorType.LAB]
+            if hab_flat_entries:
+                hab_flat_entry = hab_flat_entries[-1]
+                self._wavelength_and_pixel_adjustment_builder.set_HAB_pixel_adjustment_file(hab_flat_entry.file_path)
+
+            if lab_flat_entries:
+                lab_flat_entry = lab_flat_entries[-1]
+                self._wavelength_and_pixel_adjustment_builder.set_LAB_pixel_adjustment_file(lab_flat_entry.file_path)
+
+        # Get the direct files. There can be entries for LAB and HAB.
+        if MonId.direct in user_file_items:
+            mon_direct = user_file_items[MonId.direct]
+            hab_direct_entries = [item for item in mon_direct if item.detector_type is DetectorType.HAB]
+            lab_direct_entries = [item for item in mon_direct if item.detector_type is DetectorType.LAB]
+            if hab_direct_entries:
+                hab_direct_entry = hab_direct_entries[-1]
+                self._wavelength_and_pixel_adjustment_builder.set_HAB_wavelength_adjustment_file(
+                    hab_direct_entry.file_path)
+
+            if lab_direct_entries:
+                lab_direct_entry = lab_direct_entries[-1]
+                self._wavelength_and_pixel_adjustment_builder.set_LAB_wavelength_adjustment_file(
+                    lab_direct_entry.file_path)
+
+        # Set up the wavelength
+        if LimitsId.wavelength in user_file_items:
+            wavelength_limits = user_file_items[LimitsId.wavelength]
+            check_if_contains_only_one_element(wavelength_limits, LimitsId.wavelength)
+            wavelength_limits = wavelength_limits[-1]
+            self._wavelength_and_pixel_adjustment_builder.set_wavelength_low(wavelength_limits.start)
+            self._wavelength_and_pixel_adjustment_builder.set_wavelength_high(wavelength_limits.stop)
+            self._wavelength_and_pixel_adjustment_builder.set_wavelength_step(wavelength_limits.step)
+            self._wavelength_and_pixel_adjustment_builder.set_wavelength_step_type(wavelength_limits.step_type)
+
+    def _set_up_compatibility(self, user_file_items):
+        if LimitsId.events_binning in user_file_items:
+            events_binning = user_file_items[LimitsId.events_binning]
+            check_if_contains_only_one_element(events_binning, LimitsId.events_binning)
+            events_binning = events_binning[-1]
+            self._compatibility_builder.set_time_rebin_string(events_binning)
+
+        if OtherId.use_compatibility_mode in user_file_items:
+            use_compatibility_mode = user_file_items[OtherId.use_compatibility_mode]
+            check_if_contains_only_one_element(use_compatibility_mode, OtherId.use_compatibility_mode)
+            use_compatibility_mode = use_compatibility_mode[-1]
+            self._compatibility_builder.set_use_compatibility_mode(use_compatibility_mode)
+
+    def _add_information_to_data_state(self, user_file_items):
+        # The only thing that should be set on the data is the tube calibration file which is specified in
+        # the user file.
+        if TubeCalibrationFileId.file in user_file_items:
+            tube_calibration = user_file_items[TubeCalibrationFileId.file]
+            check_if_contains_only_one_element(tube_calibration, TubeCalibrationFileId.file)
+            tube_calibration = tube_calibration[-1]
+            self._data.calibration = tube_calibration
+
+    def convert_pos1(self, pos1):
+        """
+        Performs a conversion of position 1 of the beam centre. This is forwarded to the move builder.
+
+        @param pos1: the first position (this can be x in mm or for LARMOR and angle)
+        @return: the correctly scaled position
+        """
+        return self._move_builder.convert_pos1(pos1)
+
+    def convert_pos2(self, pos2):
+        """
+        Performs a conversion of position 2 of the beam centre. This is forwarded to the move builder.
+
+        @param pos2: the second position
+        @return: the correctly scaled position
+        """
+        return self._move_builder.convert_pos2(pos2)
diff --git a/scripts/test/SANS/CMakeLists.txt b/scripts/test/SANS/CMakeLists.txt
index 54e6432619097844410a4a521a8e4c60d535ebf3..6dbb49b1214150fd07969a9f3f71eb4f50059f0d 100644
--- a/scripts/test/SANS/CMakeLists.txt
+++ b/scripts/test/SANS/CMakeLists.txt
@@ -1,4 +1,4 @@
 add_subdirectory(common)
 add_subdirectory(state)
-
+add_subdirectory(user_file)
 
diff --git a/scripts/test/SANS/common/CMakeLists.txt b/scripts/test/SANS/common/CMakeLists.txt
index a701f70af35ec385310db87ef17a4aabfcf58896..35aa0a4d7ec8c310445dfd90f5a295a29d223624 100644
--- a/scripts/test/SANS/common/CMakeLists.txt
+++ b/scripts/test/SANS/common/CMakeLists.txt
@@ -12,6 +12,5 @@ set ( TEST_PY_FILES
 
 check_tests_valid ( ${CMAKE_CURRENT_SOURCE_DIR} ${TEST_PY_FILES} )
 
-# Prefix for test name=Python
 pyunittest_add_test ( ${CMAKE_CURRENT_SOURCE_DIR} PythonSANS ${TEST_PY_FILES} )
 
diff --git a/scripts/test/SANS/common/file_information_test.py b/scripts/test/SANS/common/file_information_test.py
index 24091f55df58455169d088adc36012ef7cd3bb3b..f66fc83f19551839a33aaec919d5c06d79b92f67 100644
--- a/scripts/test/SANS/common/file_information_test.py
+++ b/scripts/test/SANS/common/file_information_test.py
@@ -24,6 +24,7 @@ class SANSFileInformationTest(unittest.TestCase):
         self.assertTrue(file_information.get_type() == FileType.ISISNexus)
         self.assertTrue(file_information.get_run_number() == 22024)
         self.assertFalse(file_information.is_event_mode())
+        self.assertFalse(file_information.is_added_data())
 
     def test_that_can_extract_information_from_file_for_LOQ_single_period_and_raw_format(self):
         # Arrange
@@ -40,6 +41,61 @@ class SANSFileInformationTest(unittest.TestCase):
         self.assertTrue(file_information.get_instrument() == SANSInstrument.LOQ)
         self.assertTrue(file_information.get_type() == FileType.ISISRaw)
         self.assertTrue(file_information.get_run_number() == 48094)
+        self.assertFalse(file_information.is_added_data())
+
+    def test_that_can_extract_information_from_file_for_SANS2D_multi_period_event_and_nexus_format(self):
+        # Arrange
+        # The file is a multi period and event-based
+        file_name = "LARMOR00003368"
+        factory = SANSFileInformationFactory()
+
+        # Act
+        file_information = factory.create_sans_file_information(file_name)
+
+        # Assert
+        self.assertTrue(file_information.get_number_of_periods() == 4)
+        self.assertTrue(file_information.get_date() == DateAndTime("2015-06-05T14:43:49"))
+        self.assertTrue(file_information.get_instrument() == SANSInstrument.LARMOR)
+        self.assertTrue(file_information.get_type() == FileType.ISISNexus)
+        self.assertTrue(file_information.get_run_number() == 3368)
+        self.assertTrue(file_information.is_event_mode())
+        self.assertFalse(file_information.is_added_data())
+
+    def test_that_can_extract_information_for_added_histogram_data_and_nexus_format(self):
+        # Arrange
+        # The file is a single period, histogram-based and added
+        file_name = "SANS2D00022024-add"
+        factory = SANSFileInformationFactory()
+
+        # Act
+        file_information = factory.create_sans_file_information(file_name)
+
+        # Assert
+        self.assertTrue(file_information.get_number_of_periods() == 1)
+        self.assertTrue(file_information.get_date() == DateAndTime("2013-10-25T14:21:19"))
+        self.assertTrue(file_information.get_instrument() == SANSInstrument.SANS2D)
+        self.assertTrue(file_information.get_type() == FileType.ISISNexusAdded)
+        self.assertTrue(file_information.get_run_number() == 22024)
+        self.assertFalse(file_information.is_event_mode())
+        self.assertTrue(file_information.is_added_data())
+
+    def test_that_can_extract_information_for_LARMOR_added_event_data_and_multi_period_and_nexus_format(self):
+        # Arrange
+        # The file is a single period, histogram-based and added
+        file_name = "LARMOR00013065-add"
+        factory = SANSFileInformationFactory()
+
+        # Act
+        file_information = factory.create_sans_file_information(file_name)
+
+        # Assert
+        self.assertTrue(file_information.get_number_of_periods() == 4)
+        self.assertTrue(file_information.get_date() == DateAndTime("2016-10-12T04:33:47"))
+        self.assertTrue(file_information.get_instrument() == SANSInstrument.LARMOR)
+        self.assertTrue(file_information.get_type() == FileType.ISISNexusAdded)
+        self.assertTrue(file_information.get_run_number() == 13065)
+        self.assertTrue(file_information.is_event_mode())
+        self.assertTrue(file_information.is_added_data())
 
 
 class SANSFileInformationGeneralFunctionsTest(unittest.TestCase):
diff --git a/scripts/test/SANS/common/xml_parsing_test.py b/scripts/test/SANS/common/xml_parsing_test.py
index 0492200d0fb60fd538700496c5cced1ed9a32617..0507989741d2d60de2c686b2f3cb69e80a11046d 100644
--- a/scripts/test/SANS/common/xml_parsing_test.py
+++ b/scripts/test/SANS/common/xml_parsing_test.py
@@ -2,8 +2,10 @@ from __future__ import (absolute_import, division, print_function)
 import unittest
 import mantid
 
+from mantid.kernel import DateAndTime
 from sans.common.file_information import (SANSFileInformationFactory, get_instrument_paths_for_sans_file)
-from sans.common.xml_parsing import (get_named_elements_from_ipf_file, get_monitor_names_from_idf_file)
+from sans.common.xml_parsing import (get_named_elements_from_ipf_file, get_monitor_names_from_idf_file,
+                                     get_valid_to_time_from_idf_string)
 
 
 class XMLParsingTest(unittest.TestCase):
@@ -60,6 +62,26 @@ class XMLParsingTest(unittest.TestCase):
         for key, value in list(results.items()):
             self.assertTrue(value == ("monitor"+str(key)))
 
+    def test_that_get_valid_to_date_from_idf_string(self):
+        # Arrange
+        idf_string = '<?xml version="1.0" encoding="UTF-8" ?>' \
+                     '<!-- For help on the notation used to specify an Instrument Definition File ' \
+                     'see http://www.mantidproject.org/IDF -->' \
+                     '<instrument xmlns="http://www.mantidproject.org/IDF/1.0" ' \
+                     '            xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ' \
+                     '            xsi:schemaLocation="http://www.mantidproject.org/IDF/1.0 http://schema.mantidproject.org/IDF/1.0/IDFSchema.xsd" ' \
+                     '            name="PEARL" valid-from   ="1900-01-31 23:59:59" ' \
+                     '            valid-to     ="2011-05-01 23:59:50" ' \
+                     '            last-modified="2008-09-17 05:00:00">' \
+                     '</instrument>'
+
+        # Act
+        extracted_time = get_valid_to_time_from_idf_string(idf_string)
+        # Assert
+        self.assertTrue(extracted_time == DateAndTime("2011-05-01 23:59:50"))
+
 
 if __name__ == '__main__':
     unittest.main()
+
+
diff --git a/scripts/test/SANS/state/CMakeLists.txt b/scripts/test/SANS/state/CMakeLists.txt
index d16f314ce579ef44ceafc1807e494d732c08b7bb..a9a1eaef13846fa1c9fdc58b9edc3ffb11749269 100644
--- a/scripts/test/SANS/state/CMakeLists.txt
+++ b/scripts/test/SANS/state/CMakeLists.txt
@@ -4,23 +4,24 @@
 
 set ( TEST_PY_FILES
    adjustment_test.py
+   state_base_test.py
    calculate_transmission_test.py
    convert_to_q_test.py
    data_test.py
+   state_functions_test.py
+   scale_test.py
    mask_test.py
    move_test.py
    normalize_to_monitor_test.py
    reduction_mode_test.py
-   save_test.py
-   scale_test.py
    slice_event_test.py
-   state_base_test.py
-   state_functions_test.py
+   save_test.py
    state_test.py
-   wavelength_and_pixel_adjustment_test.py
    wavelength_test.py
+   wavelength_and_pixel_adjustment_test.py
 )
+
 check_tests_valid ( ${CMAKE_CURRENT_SOURCE_DIR} ${TEST_PY_FILES} )
 
-# Prefix for test name=Python
-pyunittest_add_test ( ${CMAKE_CURRENT_SOURCE_DIR} PythonSANS ${TEST_PY_FILES} )
+# Prefix for test name=PythonAlgorithms
+pyunittest_add_test ( ${CMAKE_CURRENT_SOURCE_DIR} PythonAlgorithmsSANS ${TEST_PY_FILES} )
diff --git a/scripts/test/SANS/state/state_functions_test.py b/scripts/test/SANS/state/state_functions_test.py
index acfac65f524254b4ec59a361b4beaf9661eb6e0b..8b7aa5284499a729d149ea745a4f001502fa12fc 100644
--- a/scripts/test/SANS/state/state_functions_test.py
+++ b/scripts/test/SANS/state/state_functions_test.py
@@ -4,9 +4,9 @@ import mantid
 
 from mantid.api import AnalysisDataService
 from sans.state.state_functions import (get_output_workspace_name, is_pure_none_or_not_none, one_is_none,
-                                            validation_message, is_not_none_and_first_larger_than_second,
-                                            write_hash_into_reduced_can_workspace, get_reduced_can_workspace_from_ads)
-from test_director import TestDirector
+                                        validation_message, is_not_none_and_first_larger_than_second,
+                                        write_hash_into_reduced_can_workspace, get_reduced_can_workspace_from_ads)
+from sans.test_helper.test_director import TestDirector
 from sans.state.data import StateData
 from sans.common.enums import (ReductionDimensionality, ISISReductionMode, OutputParts)
 from sans.common.general_functions import create_unmanaged_algorithm
@@ -80,7 +80,7 @@ class StateFunctionsTest(unittest.TestCase):
         # Act
         output_workspace = get_output_workspace_name(state, ISISReductionMode.LAB)
         # Assert
-        self.assertTrue("12345rear_1D12.0_34.0Phi12.0_56.0_t4.57_T12.37" == output_workspace)
+        self.assertTrue("12345rear_1D_12.0_34.0Phi12.0_56.0_t4.57_T12.37" == output_workspace)
 
     def test_that_detects_if_all_entries_are_none_or_not_none_as_true(self):
         self.assertFalse(is_pure_none_or_not_none(["test", None, "test"]))
diff --git a/scripts/test/SANS/state/state_test.py b/scripts/test/SANS/state/state_test.py
index b1102b4a97a1a31001a35d3f3423762d9dcb175d..6a8a730a73cead06daa0433b90eb9d5ed5aa44de 100644
--- a/scripts/test/SANS/state/state_test.py
+++ b/scripts/test/SANS/state/state_test.py
@@ -18,7 +18,7 @@ from sans.state.adjustment import (StateAdjustment)
 from sans.state.convert_to_q import (StateConvertToQ)
 
 from state_test_helper import assert_validate_error, assert_raises_nothing
-
+from sans.common.enums import SANSInstrument
 
 # ----------------------------------------------------------------------------------------------------------------------
 #  State
@@ -105,6 +105,7 @@ class StateTest(unittest.TestCase):
                            "slice": MockStateSliceEvent(), "mask": MockStateMask(), "wavelength": MockStateWavelength(),
                            "save": MockStateSave(), "scale": MockStateScale(), "adjustment": MockStateAdjustment(),
                            "convert_to_q": MockStateConvertToQ()}
+        default_entries["data"].instrument = SANSInstrument.LARMOR
 
         for key, value in list(default_entries.items()):
             if key in entries:
diff --git a/scripts/test/SANS/user_file/CMakeLists.txt b/scripts/test/SANS/user_file/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..36343925914b3b4b8a567d150aae4a7dd4c6c002
--- /dev/null
+++ b/scripts/test/SANS/user_file/CMakeLists.txt
@@ -0,0 +1,14 @@
+#
+## Tests for SANSState
+##
+
+set ( TEST_PY_FILES
+  user_file_parser_test.py
+  user_file_reader_test.py
+  user_file_state_director_test.py
+  )
+
+check_tests_valid ( ${CMAKE_CURRENT_SOURCE_DIR} ${TEST_PY_FILES} )
+
+# Prefix for test name=PythonAlgorithms
+pyunittest_add_test ( ${CMAKE_CURRENT_SOURCE_DIR} PythonAlgorithmsSANS ${TEST_PY_FILES} )
diff --git a/scripts/test/SANS/user_file/user_file_parser_test.py b/scripts/test/SANS/user_file/user_file_parser_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..d4a0ac4bfdb9f05a616ea877a3355b3c7839aee2
--- /dev/null
+++ b/scripts/test/SANS/user_file/user_file_parser_test.py
@@ -0,0 +1,1019 @@
+import unittest
+import mantid
+
+from sans.common.enums import (ISISReductionMode, DetectorType, RangeStepType, FitType, DataType)
+from sans.user_file.user_file_parser import (DetParser, LimitParser, MaskParser, SampleParser, SetParser, TransParser,
+                                             TubeCalibFileParser, QResolutionParser, FitParser, GravityParser,
+                                             MaskFileParser, MonParser, PrintParser, BackParser, SANS2DParser, LOQParser,
+                                             UserFileParser)
+from sans.user_file.user_file_common import (DetectorId, BackId, range_entry, back_single_monitor_entry,
+                                             single_entry_with_detector, mask_angle_entry, LimitsId, rebin_string_values,
+                                             simple_range, complex_range, MaskId, mask_block, mask_block_cross,
+                                             mask_line, range_entry_with_detector, SampleId, SetId, set_scales_entry,
+                                             position_entry, TransId, TubeCalibrationFileId, QResolutionId, FitId,
+                                             fit_general, MonId, monitor_length, monitor_file, GravityId,
+                                             monitor_spectrum, PrintId, det_fit_range)
+
+
+# -----------------------------------------------------------------
+# --- Free Helper Functions for Testing ---------------------------
+# -----------------------------------------------------------------
+def assert_valid_result(result, expected, assert_true):
+    keys_result = list(result.keys())
+    keys_expected = list(expected.keys())
+    assert_true(len(keys_expected) == len(keys_result))
+    for key in keys_result:
+        assert_true(key in keys_expected)
+        assert_true(result[key] == expected[key])
+
+
+def assert_valid_parse(parser, to_parse, expected, assert_true):
+    result = parser.parse_line(to_parse)
+    # Same amount of keys
+    assert_valid_result(result, expected, assert_true)
+
+
+def assert_invalid_parse(parser, to_parse, exception, assert_raises):
+    assert_raises(exception, parser.parse_line, to_parse)
+
+
+def do_test(parser, valid_settings, invalid_settings, assert_true, assert_raises):
+    for setting in valid_settings:
+        assert_valid_parse(parser, setting, valid_settings[setting], assert_true)
+
+    for setting in invalid_settings:
+        assert_invalid_parse(parser, setting, invalid_settings[setting], assert_raises)
+
+
+# # -----------------------------------------------------------------
+# # --- Tests -------------------------------------------------------
+# # -----------------------------------------------------------------
+class DetParserTest(unittest.TestCase):
+    def test_that_gets_type(self):
+        self.assertTrue(DetParser.get_type(), "DET")
+
+    def test_that_reduction_mode_is_parsed_correctly(self):
+        # The dict below has the string to parse as the key and the expected result as a value
+        valid_settings = {"DET/HAB": {DetectorId.reduction_mode: ISISReductionMode.HAB},
+                          "dEt/ frONT ": {DetectorId.reduction_mode: ISISReductionMode.HAB},
+                          "dET/REAR": {DetectorId.reduction_mode: ISISReductionMode.LAB},
+                          "dEt/MAIn   ": {DetectorId.reduction_mode: ISISReductionMode.LAB},
+                          " dEt/ BOtH": {DetectorId.reduction_mode: ISISReductionMode.All},
+                          "DeT /merge ": {DetectorId.reduction_mode: ISISReductionMode.Merged},
+                          " DEt / MERGED": {DetectorId.reduction_mode: ISISReductionMode.Merged}}
+
+        invalid_settings = {"DET/HUB": RuntimeError,
+                            "DET/HAB/": RuntimeError}
+        det_parser = DetParser()
+        do_test(det_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_merge_option_is_parsed_correctly(self):
+        valid_settings = {"DET/RESCALE 123": {DetectorId.rescale: 123},
+                          "dEt/ shiFt 48.5": {DetectorId.shift: 48.5},
+                          "dET/reSCale/FIT   23 34.6 ": {DetectorId.rescale_fit: det_fit_range(start=23, stop=34.6,
+                                                                                               use_fit=True)},
+                          "dEt/SHIFT/FIT 235.2  341   ": {DetectorId.shift_fit: det_fit_range(start=235.2, stop=341,
+                                                                                              use_fit=True)}}
+
+        invalid_settings = {"DET/Ruscale": RuntimeError,
+                            "DET/SHIFT/": RuntimeError,
+                            "DET/SHIFT 1 2": RuntimeError,
+                            "DET/SHIFT/FIT 1 ": RuntimeError,
+                            "DET/Rescale/FIT 1 2 4": RuntimeError}
+
+        det_parser = DetParser()
+        do_test(det_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_detector_setting_is_parsed_correctly(self):
+        valid_settings = {"Det/CORR/REAR/X 123": {DetectorId.correction_x: single_entry_with_detector(entry=123,
+                                                                    detector_type=DetectorType.LAB)},  # noqa
+                          "DEt/CORR/ frOnt/X +95.7": {DetectorId.correction_x:
+                                                          single_entry_with_detector(entry=95.7,
+                                                                                     detector_type=DetectorType.HAB)},
+                          "DeT/ CORR / ReAR/ y 12.3": {DetectorId.correction_y:
+                                                           single_entry_with_detector(entry=12.3,
+                                                                                      detector_type=DetectorType.LAB)},
+                          " DET/CoRR/fROnt/Y -957": {DetectorId.correction_y:
+                                                         single_entry_with_detector(entry=-957,
+                                                                                    detector_type=DetectorType.HAB)},
+                          "DeT/ CORR /reAR/Z 12.3": {DetectorId.correction_z:
+                                                         single_entry_with_detector(entry=12.3,
+                                                                                    detector_type=DetectorType.LAB)},
+                          " DET/CoRR/FRONT/ Z -957": {DetectorId.correction_z:
+                                                          single_entry_with_detector(entry=-957,
+                                                                                     detector_type=DetectorType.HAB)},
+                          "DeT/ CORR /reAR/SIDE 12.3": {DetectorId.correction_translation:
+                                                            single_entry_with_detector(entry=12.3,
+                                                                                       detector_type=DetectorType.LAB)},
+                          " DET/CoRR/FRONT/ SidE -957": {DetectorId.correction_translation:
+                                                             single_entry_with_detector(entry=-957,
+                                                                                    detector_type=DetectorType.HAB)},
+                          "DeT/ CORR /reAR/ROt 12.3": {DetectorId.correction_rotation:
+                                                           single_entry_with_detector(entry=12.3,
+                                                                                      detector_type=DetectorType.LAB)},
+                          " DET/CoRR/FRONT/ROT -957": {DetectorId.correction_rotation:
+                                                           single_entry_with_detector(entry=-957,
+                                                                                      detector_type=DetectorType.HAB)},
+                          "DeT/ CORR /reAR/Radius 12.3": {DetectorId.correction_radius:
+                                                              single_entry_with_detector(entry=12.3,
+                                                                                     detector_type=DetectorType.LAB)},
+                          " DET/CoRR/FRONT/RADIUS 957": {DetectorId.correction_radius:
+                                                             single_entry_with_detector(entry=957,
+                                                                                     detector_type=DetectorType.HAB)}}
+
+        invalid_settings = {"Det/CORR/REAR/X ": RuntimeError,
+                            "DEt/CORR/ frOnt/X 12 23": RuntimeError,
+                            " DET/CoRR/fROnt": RuntimeError,
+                            "DeT/ CORR /reAR/Z test": RuntimeError,
+                            " DET/CoRR/FRONT/ ZZ -957": RuntimeError,
+                            "DeT/ CORR /reAR/SIDE D 12.3": RuntimeError,
+                            " DET/CoRR/FRONT/ SidE -i3": RuntimeError}
+
+        det_parser = DetParser()
+        do_test(det_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+
+class LimitParserTest(unittest.TestCase):
+    def test_that_gets_type(self):
+        self.assertTrue(LimitParser.get_type(), "L")
+
+    def test_that_angle_limit_is_parsed_correctly(self):
+        valid_settings = {"L/PhI 123   345.2": {LimitsId.angle: mask_angle_entry(min=123, max=345.2,
+                                                                                 use_mirror=True)},
+                          "L/PHI / NOMIRROR 123 -345.2": {LimitsId.angle: mask_angle_entry(min=123, max=-345.2,
+                                                                                           use_mirror=False)}}
+
+        invalid_settings = {"L/PHI/NMIRROR/ 23 454": RuntimeError,
+                            "L /pHI/ 23": RuntimeError,
+                            "L/PhI/ f f": RuntimeError,
+                            "L/ PHI/ f f": RuntimeError}
+
+        limit_parser = LimitParser()
+        do_test(limit_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_event_time_limit_is_parsed_correctly(self):
+        valid_settings = {"L  / EVEnTStime 0,-10,32,434,34523,35": {LimitsId.events_binning:
+                                                                    "0.0,-10.0,32.0,434.0,34523.0,35.0"}}
+
+        invalid_settings = {"L  / EEnTStime 0,-10,32,434,34523,35": RuntimeError,
+                            "L/EVENTSTIME 123g, sdf": RuntimeError,
+                            "L  /EvEnTStime": RuntimeError}
+        limit_parser = LimitParser()
+        do_test(limit_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_cut_limits_are_parsed_correctly(self):
+        valid_settings = {"L/Q/RCUT 234.4": {LimitsId.radius_cut: 234.4},
+                          "L /q / RcUT -234.34": {LimitsId.radius_cut: -234.34},
+                          "l/Q/WCUT 234.4": {LimitsId.wavelength_cut: 234.4},
+                          "L /q / wcUT -234.34": {LimitsId.wavelength_cut: -234.34}}
+
+        invalid_settings = {"L/Q/Rcu 123": RuntimeError,
+                            "L/Q/RCUT/ 2134": RuntimeError,
+                            "L/Q/Wcut 23 234": RuntimeError,
+                            "L/Q/WCUT": RuntimeError,
+                            "L / Q / WCUT234": RuntimeError}
+
+        limit_parser = LimitParser()
+        do_test(limit_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_radius_limits_are_parsed_correctly(self):
+        valid_settings = {"L/R 234 235": {LimitsId.radius: range_entry(start=234, stop=235)},
+                          "L / r   -234   235": {LimitsId.radius: range_entry(start=-234, stop=235)},
+                          "L / r   -234   235 454": {LimitsId.radius: range_entry(start=-234, stop=235)}
+                          }
+        invalid_settings = {"L/R/ 234 435": RuntimeError,
+                            "L/Rr 234 435": RuntimeError,
+                            "L/R 435": RuntimeError,
+                            "L/R sdf": RuntimeError,
+                            "L/R": RuntimeError}
+
+        limit_parser = LimitParser()
+        do_test(limit_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_q_limits_are_parsed_correctly(self):
+        valid_settings = {"L/Q 12 34": {LimitsId.q: simple_range(start=12, stop=34, step=None, step_type=None)},
+                          "L/Q 12 34 2.7": {LimitsId.q: simple_range(start=12, stop=34, step=2.7,
+                                                                     step_type=RangeStepType.Lin)},
+                          "L/Q -12 34.6 2.7/LOG": {LimitsId.q: simple_range(start=-12, stop=34.6, step=2.7,
+                                                                            step_type=RangeStepType.Log)},
+                          "L/q -12 3.6 2 /LIN": {LimitsId.q: simple_range(start=-12, stop=3.6, step=2,
+                                                                          step_type=RangeStepType.Lin)},
+                          "L/q -12 ,  0.4  ,23 ,-34.8, 3.6": {LimitsId.q: complex_range(start=-12, step1=0.4,
+                                                              mid=23, step2=34.8, stop=3.6,
+                                                              step_type1=RangeStepType.Lin,
+                                                              step_type2=RangeStepType.Log)},
+                          "L/q -12  , 0.4 , 23 ,-34.8 ,3.6 /LIn": {LimitsId.q: complex_range(start=-12, step1=0.4,
+                                                                   mid=23, step2=34.8, stop=3.6,
+                                                                   step_type1=RangeStepType.Lin,
+                                                                   step_type2=RangeStepType.Lin)},
+                          "L/q -12  , 0.4 , 23  ,34.8 ,3.6  /Log": {LimitsId.q: complex_range(start=-12,
+                                                                    step1=0.4, mid=23, step2=34.8, stop=3.6,
+                                                                    step_type1=RangeStepType.Log,
+                                                                    step_type2=RangeStepType.Log)}
+                          }
+
+        invalid_settings = {"L/Q 12 2 3 4": RuntimeError,
+                            "L/Q 12 2 3 4 23 3": RuntimeError,
+                            "L/Q 12 2 3 4 5/LUG": RuntimeError,
+                            "L/Q 12 2 /LIN": RuntimeError,
+                            "L/Q ": RuntimeError,
+                            "L/Q a 1 2 3 4 /LIN": RuntimeError}
+
+        limit_parser = LimitParser()
+        do_test(limit_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_qxy_limits_are_parsed_correctly(self):
+        valid_settings = {"L/QXY 12 34": {LimitsId.qxy: simple_range(start=12, stop=34, step=None,  step_type=None)},
+                          "L/QXY 12 34 2.7": {LimitsId.qxy: simple_range(start=12, stop=34, step=2.7,
+                                                                         step_type=RangeStepType.Lin)},
+                          "L/QXY -12 34.6 2.7/LOG": {LimitsId.qxy: simple_range(start=-12, stop=34.6, step=2.7,
+                                                                                step_type=RangeStepType.Log)},
+                          "L/qxY -12 3.6 2 /LIN": {LimitsId.qxy: simple_range(start=-12, stop=3.6, step=2,
+                                                                              step_type=RangeStepType.Lin)},
+                          "L/qxy -12  , 0.4,  23, -34.8, 3.6": {LimitsId.qxy: complex_range(start=-12, step1=0.4,
+                                                                mid=23, step2=34.8, stop=3.6,
+                                                                step_type1=RangeStepType.Lin,
+                                                                step_type2=RangeStepType.Log)},
+                          "L/qXY -12  , 0.4 , 23 ,34.8 ,3.6 /LIn": {LimitsId.qxy: complex_range(start=-12,
+                                                                    step1=0.4, mid=23, step2=34.8, stop=3.6,
+                                                                    step_type1=RangeStepType.Lin,
+                                                                    step_type2=RangeStepType.Lin)},
+                          "L/qXY -12   ,0.4,  23  ,34.8 ,3.6  /Log": {LimitsId.qxy: complex_range(start=-12,
+                                                                      step1=0.4, mid=23, step2=34.8, stop=3.6,
+                                                                      step_type1=RangeStepType.Log,
+                                                                      step_type2=RangeStepType.Log)}}
+
+        invalid_settings = {"L/QXY 12 2 3 4": RuntimeError,
+                            "L/QXY 12 2 3 4 23 3": RuntimeError,
+                            "L/QXY 12 2 3 4 5/LUG": RuntimeError,
+                            "L/QXY 12 2 /LIN": RuntimeError,
+                            "L/QXY ": RuntimeError,
+                            "L/QXY a 1 2 3 4 /LIN": RuntimeError}
+
+        limit_parser = LimitParser()
+        do_test(limit_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_wavelength_limits_are_parsed_correctly(self):
+        valid_settings = {"L/WAV 12 34": {LimitsId.wavelength: simple_range(start=12, stop=34, step=None,
+                                                                            step_type=None)},
+                          "L/waV 12 34 2.7": {LimitsId.wavelength: simple_range(start=12, stop=34, step=2.7,
+                                                                                step_type=RangeStepType.Lin)},
+                          "L/wAv -12 34.6 2.7/LOG": {LimitsId.wavelength: simple_range(start=-12, stop=34.6, step=2.7,
+                                                                                       step_type=RangeStepType.Log)},
+                          "L/WaV -12 3.6 2 /LIN": {LimitsId.wavelength: simple_range(start=-12, stop=3.6,  step=2,
+                                                                                     step_type=RangeStepType.Lin)}}
+
+        invalid_settings = {"L/WAV 12 2 3 4": RuntimeError,
+                            "L/WAV 12 2 3 4 23 3": RuntimeError,
+                            "L/WAV 12 2 3 4 5/LUG": RuntimeError,
+                            "L/WAV 12 2 /LIN": RuntimeError,
+                            "L/WAV ": RuntimeError,
+                            "L/WAV a 1 2 3 4 /LIN": RuntimeError}
+
+        limit_parser = LimitParser()
+        do_test(limit_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+
+class MaskParserTest(unittest.TestCase):
+    def test_that_gets_type(self):
+        self.assertTrue(MaskParser.get_type(), "MASK")
+
+    def test_that_masked_line_is_parsed_correctly(self):
+        valid_settings = {"MASK/LiNE 12  23.6": {MaskId.line: mask_line(width=12, angle=23.6, x=None, y=None)},
+                          "MASK/LiNE 12  23.6 2 346": {MaskId.line: mask_line(width=12, angle=23.6, x=2, y=346)}
+                          }
+        invalid_settings = {"MASK/LiN 12 4": RuntimeError,
+                            "MASK/LINE 12": RuntimeError,
+                            "MASK/LINE 12 34 345 6 7": RuntimeError,
+                            "MASK/LINE ": RuntimeError,
+                            "MASK/LINE  x y": RuntimeError,
+                            }
+
+        mask_parser = MaskParser()
+        do_test(mask_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_masked_time_is_parsed_correctly(self):
+        valid_settings = {"MASK/TIME 23 35": {MaskId.time: range_entry_with_detector(start=23, stop=35,
+                                                                                     detector_type=None)},
+                          "MASK/T 23 35": {MaskId.time: range_entry_with_detector(start=23, stop=35,
+                                                                                  detector_type=None)},
+                          "MASK/REAR/T 13 35": {MaskId.time_detector: range_entry_with_detector(start=13, stop=35,
+                                                detector_type=DetectorType.LAB)},
+                          "MASK/FRONT/TIME 33 35": {MaskId.time_detector: range_entry_with_detector(start=33, stop=35,
+                                                    detector_type=DetectorType.HAB)}
+                          }
+
+        invalid_settings = {"MASK/TIME 12 34 4 ": RuntimeError,
+                            "MASK/T 2": RuntimeError,
+                            "MASK/T": RuntimeError,
+                            "MASK/T x y": RuntimeError,
+                            "MASK/REA/T 12 13": RuntimeError}
+
+        mask_parser = MaskParser()
+        do_test(mask_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_clear_mask_is_parsed_correctly(self):
+        valid_settings = {"MASK/CLEAR": {MaskId.clear_detector_mask: True},
+                          "MASK/CLeaR /TIMe": {MaskId.clear_time_mask: True}}
+
+        invalid_settings = {"MASK/CLEAR/TIME/test": RuntimeError,
+                            "MASK/CLEAR/TIIE": RuntimeError,
+                            "MASK/CLEAR test": RuntimeError}
+
+        mask_parser = MaskParser()
+        do_test(mask_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_single_spectrum_mask_is_parsed_correctly(self):
+        valid_settings = {"MASK S 12  ": {MaskId.single_spectrum_mask: 12},
+                          "MASK S234": {MaskId.single_spectrum_mask: 234}}
+
+        invalid_settings = {"MASK B 12  ": RuntimeError,
+                            "MASK S 12 23 ": RuntimeError}
+        mask_parser = MaskParser()
+        do_test(mask_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_single_spectrum_range_is_parsed_correctly(self):
+        valid_settings = {"MASK S 12 >  S23  ": {MaskId.spectrum_range_mask: range_entry(start=12, stop=23)},
+                          "MASK S234>S1234": {MaskId.spectrum_range_mask: range_entry(start=234, stop=1234)}}
+
+        invalid_settings = {"MASK S 12> S123.5  ": RuntimeError,
+                            "MASK S 12> 23 ": RuntimeError}
+        mask_parser = MaskParser()
+        do_test(mask_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_single_vertical_strip_mask_is_parsed_correctly(self):
+        valid_settings = {"MASK V 12  ": {MaskId.vertical_single_strip_mask: single_entry_with_detector(entry=12,
+                                          detector_type=DetectorType.LAB)},
+                          "MASK / Rear V  12  ": {MaskId.vertical_single_strip_mask: single_entry_with_detector(
+                                                  entry=12, detector_type=DetectorType.LAB)},
+                          "MASK/mAin V234": {MaskId.vertical_single_strip_mask: single_entry_with_detector(entry=234,
+                                             detector_type=DetectorType.LAB)},
+                          "MASK / LaB V  234": {MaskId.vertical_single_strip_mask: single_entry_with_detector(entry=234,
+                                                detector_type=DetectorType.LAB)},
+                          "MASK /frOnt V  12  ": {MaskId.vertical_single_strip_mask: single_entry_with_detector(
+                                                  entry=12, detector_type=DetectorType.HAB)},
+                          "MASK/HAB V234": {MaskId.vertical_single_strip_mask:  single_entry_with_detector(entry=234,
+                                            detector_type=DetectorType.HAB)}}
+
+        invalid_settings = {"MASK B 12  ": RuntimeError,
+                            "MASK V 12 23 ": RuntimeError,
+                            "MASK \Rear V3": RuntimeError}
+        mask_parser = MaskParser()
+        do_test(mask_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_range_vertical_strip_mask_is_parsed_correctly(self):
+        valid_settings = {"MASK V  12 >  V23  ": {MaskId.vertical_range_strip_mask: range_entry_with_detector(start=12,
+                                                  stop=23, detector_type=DetectorType.LAB)},
+                          "MASK V123>V234": {MaskId.vertical_range_strip_mask: range_entry_with_detector(start=123,
+                                             stop=234, detector_type=DetectorType.LAB)},
+                          "MASK / Rear V123>V234": {MaskId.vertical_range_strip_mask:  range_entry_with_detector(
+                                                    start=123, stop=234, detector_type=DetectorType.LAB)},
+                          "MASK/mAin  V123>V234": {MaskId.vertical_range_strip_mask: range_entry_with_detector(
+                                                   start=123, stop=234, detector_type=DetectorType.LAB)},
+                          "MASK / LaB V123>V234": {MaskId.vertical_range_strip_mask: range_entry_with_detector(
+                                                   start=123, stop=234, detector_type=DetectorType.LAB)},
+                          "MASK/frOnt V123>V234": {MaskId.vertical_range_strip_mask: range_entry_with_detector(
+                                                   start=123, stop=234, detector_type=DetectorType.HAB)},
+                          "MASK/HAB V123>V234": {MaskId.vertical_range_strip_mask: range_entry_with_detector(
+                                                 start=123, stop=234, detector_type=DetectorType.HAB)}}
+
+        invalid_settings = {"MASK V 12> V123.5  ": RuntimeError,
+                            "MASK V 12 23 ": RuntimeError,
+                            "MASK /Rear/ V12>V34": RuntimeError}
+        mask_parser = MaskParser()
+        do_test(mask_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_single_horizontal_strip_mask_is_parsed_correctly(self):
+        valid_settings = {"MASK H 12  ": {MaskId.horizontal_single_strip_mask: single_entry_with_detector(entry=12,
+                                          detector_type=DetectorType.LAB)},
+                          "MASK / Rear H  12  ": {MaskId.horizontal_single_strip_mask: single_entry_with_detector(
+                                                  entry=12, detector_type=DetectorType.LAB)},
+                          "MASK/mAin H234": {MaskId.horizontal_single_strip_mask: single_entry_with_detector(entry=234,
+                                             detector_type=DetectorType.LAB)},
+                          "MASK / LaB H  234": {MaskId.horizontal_single_strip_mask: single_entry_with_detector(
+                                                entry=234, detector_type=DetectorType.LAB)},
+                          "MASK /frOnt H  12  ": {MaskId.horizontal_single_strip_mask: single_entry_with_detector(
+                                                  entry=12, detector_type=DetectorType.HAB)},
+                          "MASK/HAB H234": {MaskId.horizontal_single_strip_mask: single_entry_with_detector(entry=234,
+                                            detector_type=DetectorType.HAB)}}
+
+        invalid_settings = {"MASK H/12  ": RuntimeError,
+                            "MASK H 12 23 ": RuntimeError,
+                            "MASK \Rear H3": RuntimeError}
+        mask_parser = MaskParser()
+        do_test(mask_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_range_horizontal_strip_mask_is_parsed_correctly(self):
+        valid_settings = {"MASK H  12 >  H23  ": {MaskId.horizontal_range_strip_mask: range_entry_with_detector(
+                                                  start=12, stop=23, detector_type=DetectorType.LAB)},
+                          "MASK H123>H234": {MaskId.horizontal_range_strip_mask: range_entry_with_detector(
+                                             start=123, stop=234, detector_type=DetectorType.LAB)},
+                          "MASK / Rear H123>H234": {MaskId.horizontal_range_strip_mask: range_entry_with_detector(
+                                                    start=123, stop=234, detector_type=DetectorType.LAB)},
+                          "MASK/mAin H123>H234": {MaskId.horizontal_range_strip_mask: range_entry_with_detector(
+                                                  start=123, stop=234, detector_type=DetectorType.LAB)},
+                          "MASK / LaB H123>H234": {MaskId.horizontal_range_strip_mask: range_entry_with_detector(
+                                                   start=123, stop=234, detector_type=DetectorType.LAB)},
+                          "MASK/frOnt H123>H234": {MaskId.horizontal_range_strip_mask: range_entry_with_detector(
+                                                   start=123, stop=234, detector_type=DetectorType.HAB)},
+                          "MASK/HAB H123>H234": {MaskId.horizontal_range_strip_mask:  range_entry_with_detector(
+                                                 start=123, stop=234, detector_type=DetectorType.HAB)}}
+
+        invalid_settings = {"MASK H 12> H123.5  ": RuntimeError,
+                            "MASK H 12 23 ": RuntimeError,
+                            "MASK /Rear/ H12>V34": RuntimeError}
+        mask_parser = MaskParser()
+        do_test(mask_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_block_mask_is_parsed_correctly(self):
+        valid_settings = {"MASK H12>H23 + V14>V15 ": {MaskId.block: mask_block(horizontal1=12, horizontal2=23,
+                                                                               vertical1=14, vertical2=15,
+                                                                               detector_type=DetectorType.LAB)},
+                          "MASK/ HAB H12>H23 + V14>V15 ": {MaskId.block: mask_block(horizontal1=12, horizontal2=23,
+                                                                                    vertical1=14, vertical2=15,
+                                                                                    detector_type=DetectorType.HAB)},
+                          "MASK/ HAB V12>V23 + H14>H15 ": {MaskId.block: mask_block(horizontal1=14, horizontal2=15,
+                                                                                    vertical1=12, vertical2=23,
+                                                                                    detector_type=DetectorType.HAB)},
+                          "MASK  V12 + H 14": {MaskId.block_cross: mask_block_cross(horizontal=14, vertical=12,
+                                                                                    detector_type=DetectorType.LAB)},
+                          "MASK/HAB H12 + V 14": {MaskId.block_cross: mask_block_cross(horizontal=12, vertical=14,
+                                                                                       detector_type=DetectorType.HAB)}}
+
+        invalid_settings = {"MASK H12>H23 + V14 + V15 ": RuntimeError,
+                            "MASK H12 + H15 ": RuntimeError,
+                            "MASK/ HAB V12 + H14>H15 ": RuntimeError}
+        mask_parser = MaskParser()
+        do_test(mask_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+
+class SampleParserTest(unittest.TestCase):
+    def test_that_gets_type(self):
+        self.assertTrue(SampleParser.get_type(), "SAMPLE")
+
+    def test_that_setting_sample_path_is_parsed_correctly(self):
+        valid_settings = {"SAMPLE /PATH/ON": {SampleId.path: True},
+                          "SAMPLE / PATH / OfF": {SampleId.path: False}}
+
+        invalid_settings = {"SAMPLE/PATH ON": RuntimeError,
+                            "SAMPLE /pATh ": RuntimeError,
+                            "SAMPLE/ Path ONN": RuntimeError}
+
+        sample_parser = SampleParser()
+        do_test(sample_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_setting_sample_offset_is_parsed_correctly(self):
+        valid_settings = {"SAMPLE /Offset 234.5": {SampleId.offset: 234.5},
+                          "SAMPLE / Offset 25": {SampleId.offset: 25}}
+
+        invalid_settings = {"SAMPL/offset fg": RuntimeError,
+                            "SAMPLE /Offset/ 23 ": RuntimeError,
+                            "SAMPLE/ offset 234 34": RuntimeError}
+
+        sample_parser = SampleParser()
+        do_test(sample_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+
+class SetParserTest(unittest.TestCase):
+    def test_that_gets_type(self):
+        self.assertTrue(SetParser.get_type(), "SET")
+
+    def test_that_setting_scales_is_parsed_correctly(self):
+        valid_settings = {"SET  scales 2 5 4    7 8": {SetId.scales: set_scales_entry(s=2, a=5, b=4, c=7, d=8)}}
+
+        invalid_settings = {"SET scales 2 4 6 7 8 9": RuntimeError,
+                            "SET scales ": RuntimeError}
+
+        set_parser = SetParser()
+        do_test(set_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_centre_is_parsed_correctly(self):
+        valid_settings = {"SET centre 23 45": {SetId.centre: position_entry(pos1=23, pos2=45,
+                                                                            detector_type=DetectorType.LAB)},
+                          "SET centre /main 23 45": {SetId.centre: position_entry(pos1=23, pos2=45,
+                                                                                  detector_type=DetectorType.LAB)},
+                          "SET centre / lAb 23 45": {SetId.centre: position_entry(pos1=23, pos2=45,
+                                                                                  detector_type=DetectorType.LAB)},
+                          "SET centre / hAb 23 45": {SetId.centre: position_entry(pos1=23, pos2=45,
+                                                                                  detector_type=DetectorType.HAB)},
+                          "SET centre /FRONT 23 45": {SetId.centre: position_entry(pos1=23, pos2=45,
+                                                      detector_type=DetectorType.HAB)}}
+
+        invalid_settings = {"SET centre 23": RuntimeError,
+                            "SEt centre 34 34 34": RuntimeError,
+                            "SEt centre/MAIN/ 34 34": RuntimeError,
+                            "SEt centre/MAIN": RuntimeError}
+
+        set_parser = SetParser()
+        do_test(set_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+
+class TransParserTest(unittest.TestCase):
+    def test_that_gets_type(self):
+        self.assertTrue(TransParser.get_type(), "TRANS")
+
+    def test_that_trans_spec_is_parsed_correctly(self):
+        valid_settings = {"TRANS/TRANSPEC=23": {TransId.spec: 23},
+                          "TRANS / TransPEC =  23": {TransId.spec: 23}}
+
+        invalid_settings = {"TRANS/TRANSPEC 23": RuntimeError,
+                            "TRANS/TRANSPEC/23": RuntimeError,
+                            "TRANS/TRANSPEC=23.5": RuntimeError,
+                            "TRANS/TRANSPEC=2t": RuntimeError,
+                            "TRANS/TRANSSPEC=23": RuntimeError}
+
+        trans_parser = TransParser()
+        do_test(trans_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_trans_spec_shift_is_parsed_correctly(self):
+        valid_settings = {"TRANS/TRANSPEC=4/SHIFT=23": {TransId.spec_shift: 23, TransId.spec: 4},
+                          "TRANS/TRANSPEC =4/ SHIFT = 23": {TransId.spec_shift: 23, TransId.spec: 4}}
+
+        invalid_settings = {"TRANS/TRANSPEC=6/SHIFT=23": RuntimeError,
+                            "TRANS/TRANSPEC=4/SHIFT/23": RuntimeError,
+                            "TRANS/TRANSPEC=4/SHIFT 23": RuntimeError,
+                            "TRANS/TRANSPEC/SHIFT=23": RuntimeError,
+                            "TRANS/TRANSPEC=6/SHIFT=t": RuntimeError}
+
+        trans_parser = TransParser()
+        do_test(trans_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_radius_is_parsed_correctly(self):
+        valid_settings = {"TRANS / radius  =23": {TransId.radius: 23},
+                          "TRANS /RADIUS= 245.7": {TransId.radius: 245.7}}
+        invalid_settings = {}
+
+        trans_parser = TransParser()
+        do_test(trans_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_roi_is_parsed_correctly(self):
+        valid_settings = {"TRANS/ROI =testFile.xml": {TransId.roi: ["testFile.xml"]},
+                          "TRANS/ROI =testFile.xml, "
+                          "TestFile2.XmL,testFile4.xml": {TransId.roi: ["testFile.xml", "TestFile2.XmL",
+                                                                        "testFile4.xml"]}}
+        invalid_settings = {"TRANS/ROI =t estFile.xml": RuntimeError,
+                            "TRANS/ROI =testFile.txt": RuntimeError,
+                            "TRANS/ROI testFile.txt": RuntimeError,
+                            "TRANS/ROI=": RuntimeError}
+
+        trans_parser = TransParser()
+        do_test(trans_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_mask_is_parsed_correctly(self):
+        valid_settings = {"TRANS/Mask =testFile.xml": {TransId.mask: ["testFile.xml"]},
+                          "TRANS/ MASK =testFile.xml, "
+                          "TestFile2.XmL,testFile4.xml": {TransId.mask: ["testFile.xml", "TestFile2.XmL",
+                                                                         "testFile4.xml"]}}
+        invalid_settings = {"TRANS/MASK =t estFile.xml": RuntimeError,
+                            "TRANS/  MASK =testFile.txt": RuntimeError,
+                            "TRANS/ MASK testFile.txt": RuntimeError,
+                            "TRANS/MASK=": RuntimeError}
+
+        trans_parser = TransParser()
+        do_test(trans_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_workspaces_are_parsed_correctly(self):
+        valid_settings = {"TRANS/SampleWS =testworksaoe234Name": {TransId.sample_workspace: "testworksaoe234Name"},
+                          "TRANS/ SampleWS = testworksaoe234Name": {TransId.sample_workspace: "testworksaoe234Name"},
+                          "TRANS/ CanWS =testworksaoe234Name": {TransId.can_workspace: "testworksaoe234Name"},
+                          "TRANS/ CANWS = testworksaoe234Name": {TransId.can_workspace: "testworksaoe234Name"}}
+        invalid_settings = {"TRANS/CANWS/ test": RuntimeError,
+                            "TRANS/SAMPLEWS =": RuntimeError}
+
+        trans_parser = TransParser()
+        do_test(trans_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+
+class TubeCalibFileParserTest(unittest.TestCase):
+    def test_that_gets_type(self):
+        self.assertTrue(TubeCalibFileParser.get_type(), "TRANS")
+
+    def test_that_tube_calibration_file_is_parsed_correctly(self):
+        valid_settings = {"TUBECALIbfile= calib_file.nxs": {TubeCalibrationFileId.file: "calib_file.nxs"},
+                          " tUBECALIBfile=  caAlib_file.Nxs": {TubeCalibrationFileId.file: "caAlib_file.Nxs"}}
+
+        invalid_settings = {"TUBECALIFILE file.nxs": RuntimeError,
+                            "TUBECALIBFILE=file.txt": RuntimeError,
+                            "TUBECALIBFILE=file": RuntimeError}
+
+        tube_calib_file_parser = TubeCalibFileParser()
+        do_test(tube_calib_file_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+
+class QResolutionParserTest(unittest.TestCase):
+    def test_that_gets_type(self):
+        self.assertTrue(QResolutionParser.get_type(), "QRESOL")
+
+    def test_that_q_resolution_on_off_is_parsed_correctly(self):
+        valid_settings = {"QRESOL/ON": {QResolutionId.on: True},
+                          "QREsoL / oFF": {QResolutionId.on: False}}
+
+        invalid_settings = {"QRESOL= ON": RuntimeError}
+
+        q_resolution_parser = QResolutionParser()
+        do_test(q_resolution_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_q_resolution_float_values_are_parsed_correctly(self):
+        valid_settings = {"QRESOL/deltaR = 23.546": {QResolutionId.delta_r: 23.546},
+                          "QRESOL/ Lcollim = 23.546": {QResolutionId.collimation_length: 23.546},
+                          "QRESOL/ a1 = 23.546": {QResolutionId.a1: 23.546},
+                          "QRESOL/ a2 =  23": {QResolutionId.a2: 23},
+                          "QRESOL /  H1 = 23.546 ": {QResolutionId.h1: 23.546},
+                          "QRESOL /h2 = 23.546 ": {QResolutionId.h2: 23.546},
+                          "QRESOL /  W1 = 23.546 ": {QResolutionId.w1: 23.546},
+                          "QRESOL /W2 = 23.546 ": {QResolutionId.w2: 23.546}
+                          }
+
+        invalid_settings = {"QRESOL/DELTAR 23": RuntimeError,
+                            "QRESOL /DELTAR = test": RuntimeError,
+                            "QRESOL /A1 t": RuntimeError,
+                            "QRESOL/B1=10": RuntimeError}
+
+        q_resolution_parser = QResolutionParser()
+        do_test(q_resolution_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_moderator_is_parsed_correctly(self):
+        valid_settings = {"QRESOL/MODERATOR = test_file.txt": {QResolutionId.moderator: "test_file.txt"}}
+
+        invalid_settings = {"QRESOL/MODERATOR = test_file.nxs": RuntimeError,
+                            "QRESOL/MODERATOR/test_file.txt": RuntimeError,
+                            "QRESOL/MODERATOR=test_filetxt": RuntimeError}
+
+        q_resolution_parser = QResolutionParser()
+        do_test(q_resolution_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+
+class FitParserTest(unittest.TestCase):
+    def test_that_gets_type(self):
+        self.assertTrue(FitParser.get_type(), "FIT")
+
+    def test_that_trans_clear_is_parsed_correctly(self):
+        valid_settings = {"FIT/ trans / clear": {FitId.clear: True},
+                          "FIT/traNS /ofF": {FitId.clear: True}}
+
+        invalid_settings = {"FIT/  clear": RuntimeError,
+                            "FIT/MONITOR/OFF": RuntimeError}
+
+        fit_parser = FitParser()
+        do_test(fit_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_general_fit_is_parsed_correctly(self):
+        valid_settings = {"FIT/ trans / LIN 123 3556": {FitId.general: fit_general(start=123, stop=3556,
+                                                        fit_type=FitType.Linear, data_type=None, polynomial_order=0)},
+                          "FIT/ tranS/linear 123 3556": {FitId.general: fit_general(start=123, stop=3556,
+                                                         fit_type=FitType.Linear, data_type=None, polynomial_order=0)},
+                          "FIT/TRANS/Straight 123 3556": {FitId.general: fit_general(start=123, stop=3556,
+                                                          fit_type=FitType.Linear, data_type=None, polynomial_order=0)},
+                          "FIT/ tranS/LoG 123  3556.6 ": {FitId.general: fit_general(start=123, stop=3556.6,
+                                                          fit_type=FitType.Log, data_type=None, polynomial_order=0)},
+                          "FIT/TRANS/  YlOG 123   3556": {FitId.general: fit_general(start=123, stop=3556,
+                                                          fit_type=FitType.Log, data_type=None, polynomial_order=0)},
+                          "FIT/Trans/Lin": {FitId.general: fit_general(start=None, stop=None, fit_type=FitType.Linear,
+                                                                       data_type=None, polynomial_order=0)},
+                          "FIT/Trans/ Log": {FitId.general: fit_general(start=None, stop=None, fit_type=FitType.Log,
+                                                                        data_type=None, polynomial_order=0)},
+                          "FIT/Trans/ polYnomial": {FitId.general: fit_general(start=None, stop=None,
+                                                    fit_type=FitType.Polynomial, data_type=None, polynomial_order=2)},
+                          "FIT/Trans/ polYnomial 3": {FitId.general: fit_general(start=None, stop=None,
+                                                                                 fit_type=FitType.Polynomial,
+                                                                                 data_type=None, polynomial_order=3)},
+                          "FIT/Trans/Sample/Log 23.4 56.7": {FitId.general: fit_general(start=23.4, stop=56.7,
+                                                             fit_type=FitType.Log, data_type=DataType.Sample,
+                                                                                        polynomial_order=0)},
+                          "FIT/Trans/can/ lIn 23.4 56.7": {FitId.general: fit_general(start=23.4, stop=56.7,
+                                                           fit_type=FitType.Linear, data_type=DataType.Can,
+                                                                                      polynomial_order=0)},
+                          "FIT/Trans / can/polynomiAL 5 23 45": {FitId.general: fit_general(start=23, stop=45,
+                                                                 fit_type=FitType.Polynomial, data_type=DataType.Can,
+                                                                                            polynomial_order=5)},
+                          }
+
+        invalid_settings = {"FIT/TRANS/ YlOG 123": RuntimeError,
+                            "FIT/TRANS/ YlOG 123 34 34": RuntimeError,
+                            "FIT/TRANS/ YlOG 123 fg": RuntimeError,
+                            "FIT/Trans / can/polynomiAL 6": RuntimeError,
+                            "FIT/Trans /": RuntimeError,
+                            "FIT/Trans / Lin 23": RuntimeError,
+                            "FIT/Trans / lin 23 5 6": RuntimeError,
+                            "FIT/Trans / lin 23 t": RuntimeError}
+
+        fit_parser = FitParser()
+        do_test(fit_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_monitor_times_are_parsed_correctly(self):
+        valid_settings = {"FIT/monitor 12 34.5": {FitId.monitor_times: range_entry(start=12, stop=34.5)},
+                          "Fit / Monitor 12.6 34.5": {FitId.monitor_times: range_entry(start=12.6, stop=34.5)}}
+
+        invalid_settings = {"Fit / Monitor 12.6 34 34": RuntimeError,
+                            "Fit / Monitor": RuntimeError}
+
+        fit_parser = FitParser()
+        do_test(fit_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+
+class GravityParserTest(unittest.TestCase):
+    def test_that_gets_type(self):
+        self.assertTrue(GravityParser.get_type(), "GRAVITY")
+
+    def test_that_gravity_on_off_is_parsed_correctly(self):
+        valid_settings = {"Gravity on ": {GravityId.on_off: True},
+                          "Gravity   OFF ": {GravityId.on_off: False}}
+
+        invalid_settings = {"Gravity ": RuntimeError,
+                            "Gravity ONN": RuntimeError}
+
+        gravity_parser = GravityParser()
+        do_test(gravity_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_gravity_extra_length_is_parsed_correctly(self):
+        valid_settings = {"Gravity/LExtra =23.5": {GravityId.extra_length: 23.5},
+                          "Gravity  / lExtra =  23.5": {GravityId.extra_length: 23.5},
+                          "Gravity  / lExtra  23.5": {GravityId.extra_length: 23.5}}
+
+        invalid_settings = {"Gravity/LExtra - 23.5": RuntimeError,
+                            "Gravity/LExtra =tw": RuntimeError}
+
+        gravity_parser = GravityParser()
+        do_test(gravity_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+
+class MaskFileParserTest(unittest.TestCase):
+    def test_that_gets_type(self):
+        self.assertTrue(MaskFileParser.get_type(), "MASKFILE")
+
+    def test_that_gravity_on_off_is_parsed_correctly(self):
+        valid_settings = {"MaskFile= test.xml,   testKsdk2.xml,tesetlskd.xml":
+                          {MaskId.file: ["test.xml", "testKsdk2.xml", "tesetlskd.xml"]}}
+
+        invalid_settings = {"MaskFile=": RuntimeError,
+                            "MaskFile=test.txt": RuntimeError,
+                            "MaskFile test.xml, test2.xml": RuntimeError}
+
+        mask_file_parser = MaskFileParser()
+        do_test(mask_file_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+
+class MonParserTest(unittest.TestCase):
+    def test_that_gets_type(self):
+        self.assertTrue(MonParser.get_type(), "MON")
+
+    def test_that_length_is_parsed_correctly(self):
+        valid_settings = {"MON/length= 23.5 34": {MonId.length: monitor_length(length=23.5, spectrum=34,
+                                                                               interpolate=False)},
+                          "MON/length= 23.5 34  / InterPolate": {MonId.length: monitor_length(length=23.5, spectrum=34,
+                                                                                              interpolate=True)}}
+
+        invalid_settings = {"MON/length= 23.5 34.7": RuntimeError,
+                            "MON/length 23.5 34": RuntimeError,
+                            "MON/length=23.5": RuntimeError,
+                            "MON/length/23.5 34": RuntimeError}
+
+        mon_parser = MonParser()
+        do_test(mon_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_direct_files_are_parsed_correctly(self):
+        valid_settings = {"MON/DIRECT= C:\path1\Path2\file.ext ": {MonId.direct: [monitor_file(
+                          file_path="C:/path1/Path2/file.ext", detector_type=DetectorType.HAB),
+                          monitor_file(file_path="C:/path1/Path2/file.ext", detector_type=DetectorType.LAB)]},
+                          "MON/ direct  = filE.Ext ": {MonId.direct: [monitor_file(file_path="filE.Ext",
+                                                       detector_type=DetectorType.HAB), monitor_file(
+                                                       file_path="filE.Ext", detector_type=DetectorType.LAB)
+                                                       ]},
+                          "MON/DIRECT= \path1\Path2\file.ext ": {MonId.direct: [monitor_file(
+                                                                 file_path="/path1/Path2/file.ext",
+                                                                 detector_type=DetectorType.HAB),
+                                                                 monitor_file(file_path="/path1/Path2/file.ext",
+                                                                              detector_type=DetectorType.LAB)]},
+                          "MON/DIRECT= /path1/Path2/file.ext ": {MonId.direct: [monitor_file(
+                                                                                file_path="/path1/Path2/file.ext",
+                                                                                detector_type=DetectorType.HAB),
+                                                                 monitor_file(file_path="/path1/Path2/file.ext",
+                                                                              detector_type=DetectorType.LAB)]},
+                          "MON/DIRECT/ rear= /path1/Path2/file.ext ": {MonId.direct: [monitor_file(
+                                                                       file_path="/path1/Path2/file.ext",
+                                                                       detector_type=DetectorType.LAB)]},
+                          "MON/DIRECT/ frONT= path1/Path2/file.ext ": {MonId.direct: [monitor_file(
+                                                                       file_path="path1/Path2/file.ext",
+                                                                       detector_type=DetectorType.HAB)]}
+                          }
+
+        invalid_settings = {"MON/DIRECT= /path1/ Path2/file.ext ": RuntimeError,
+                            "MON/DIRECT /path1/Path2/file.ext ": RuntimeError,
+                            "MON/DIRECT=/path1/Path2/file ": RuntimeError}
+
+        mon_parser = MonParser()
+        do_test(mon_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_flat_files_are_parsed_correctly(self):
+        valid_settings = {"MON/FLat  = C:\path1\Path2\file.ext ": {MonId.flat: monitor_file(
+                                                                   file_path="C:/path1/Path2/file.ext",
+                                                                   detector_type=DetectorType.LAB)},
+                          "MON/ flAt  = filE.Ext ": {MonId.flat: monitor_file(file_path="filE.Ext",
+                                                     detector_type=DetectorType.LAB)},
+                          "MON/flAT= \path1\Path2\file.ext ": {MonId.flat: monitor_file(
+                                                               file_path="/path1/Path2/file.ext",
+                                                               detector_type=DetectorType.LAB)},
+                          "MON/FLat= /path1/Path2/file.ext ": {MonId.flat: monitor_file(
+                                                               file_path="/path1/Path2/file.ext",
+                                                               detector_type=DetectorType.LAB)},
+                          "MON/FLat/ rear= /path1/Path2/file.ext ": {MonId.flat: monitor_file(
+                                                                     file_path="/path1/Path2/file.ext",
+                                                                     detector_type=DetectorType.LAB)},
+                          "MON/FLat/ frONT= path1/Path2/file.ext ": {MonId.flat: monitor_file(
+                                                                     file_path="path1/Path2/file.ext",
+                                                                     detector_type=DetectorType.HAB)}}
+
+        invalid_settings = {"MON/DIRECT= /path1/ Path2/file.ext ": RuntimeError,
+                            "MON/DIRECT /path1/Path2/file.ext ": RuntimeError,
+                            "MON/DIRECT=/path1/Path2/file ": RuntimeError}
+
+        mon_parser = MonParser()
+        do_test(mon_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_hab_files_are_parsed_correctly(self):
+        valid_settings = {"MON/HAB  = C:\path1\Path2\file.ext ": {MonId.hab: "C:/path1/Path2/file.ext"},
+                          "MON/ hAB  = filE.Ext ": {MonId.hab: "filE.Ext"},
+                          "MON/HAb= \path1\Path2\file.ext ": {MonId.hab: "/path1/Path2/file.ext"},
+                          "MON/hAB= /path1/Path2/file.ext ": {MonId.hab: "/path1/Path2/file.ext"}}
+        invalid_settings = {"MON/HAB= /path1/ Path2/file.ext ": RuntimeError,
+                            "MON/hAB /path1/Path2/file.ext ": RuntimeError,
+                            "MON/HAB=/path1/Path2/file ": RuntimeError}
+
+        mon_parser = MonParser()
+        do_test(mon_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_hab_files_are_parsed_correctly2(self):
+        valid_settings = {"MON/Spectrum = 123 ": {MonId.spectrum: monitor_spectrum(spectrum=123, is_trans=False,
+                                                                                   interpolate=False)},
+                          "MON/trans/Spectrum = 123 ": {MonId.spectrum: monitor_spectrum(spectrum=123, is_trans=True,
+                                                                                         interpolate=False)},
+                          "MON/trans/Spectrum = 123 /  interpolate": {MonId.spectrum: monitor_spectrum(spectrum=123,
+                                                                      is_trans=True, interpolate=True)},
+                          "MON/Spectrum = 123 /  interpolate": {MonId.spectrum:  monitor_spectrum(spectrum=123,
+                                                                                                  is_trans=False,
+                                                                                                  interpolate=True)}}
+        invalid_settings = {}
+
+        mon_parser = MonParser()
+        do_test(mon_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+
+class PrintParserTest(unittest.TestCase):
+    def test_that_gets_type(self):
+        self.assertTrue(PrintParser.get_type(), "PRINT")
+
+    def test_that_print_is_parsed_correctly(self):
+        valid_settings = {"PRINT OdlfP slsk 23lksdl2 34l": {PrintId.print_line: "OdlfP slsk 23lksdl2 34l"}}
+
+        invalid_settings = {}
+
+        print_parser = PrintParser()
+        do_test(print_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+
+class BackParserTest(unittest.TestCase):
+    def test_that_gets_type(self):
+        self.assertTrue(BackParser.get_type(), "BACK")
+
+    def test_that_all_monitors_is_parsed_correctly(self):
+        valid_settings = {"BACK / MON /times  123 34": {BackId.all_monitors: range_entry(start=123, stop=34)}}
+
+        invalid_settings = {}
+
+        back_parser = BackParser()
+        do_test(back_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_single_monitors_is_parsed_correctly(self):
+        valid_settings = {"BACK / M3 /times  123 34": {BackId.single_monitors: back_single_monitor_entry(monitor=3,
+                                                                                                         start=123,
+                                                                                                         stop=34)},
+                          "BACK / M3 123 34": {BackId.single_monitors: back_single_monitor_entry(monitor=3,
+                                                                                                 start=123,
+                                                                                                 stop=34)}}
+
+        invalid_settings = {"BACK / M 123 34": RuntimeError,
+                            "BACK / M3 123": RuntimeError}
+
+        back_parser = BackParser()
+        do_test(back_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_off_is_parsed_correctly(self):
+        valid_settings = {"BACK / M3 /OFF": {BackId.monitor_off: 3}}
+
+        invalid_settings = {"BACK / M /OFF": RuntimeError}
+
+        back_parser = BackParser()
+        do_test(back_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+    def test_that_trans_mon_is_parsed_correctly(self):
+        valid_settings = {"BACK / TRANS  123 344": {BackId.trans:  range_entry(start=123, stop=344)},
+                          "BACK / tranS 123 34": {BackId.trans: range_entry(start=123, stop=34)}}
+
+        invalid_settings = {"BACK / Trans / 123 34": RuntimeError,
+                            "BACK / trans 123": RuntimeError}
+
+        back_parser = BackParser()
+        do_test(back_parser, valid_settings, invalid_settings, self.assertTrue, self.assertRaises)
+
+
+class SANS2DParserTest(unittest.TestCase):
+    def test_that_gets_type(self):
+        self.assertTrue(SANS2DParser.get_type(), "SANS2D")
+
+    def test_that_sans2d_is_parsed_correctly(self):
+        sans_2d_parser = SANS2DParser()
+        result = sans_2d_parser.parse_line("SANS2D ")
+        self.assertTrue(result is not None)
+        self.assertTrue(not result)
+
+
+class LOQParserTest(unittest.TestCase):
+    def test_that_gets_type(self):
+        self.assertTrue(LOQParser.get_type(), "LOQ")
+
+    def test_that_loq_is_parsed_correctly(self):
+        loq_parser = LOQParser()
+        result = loq_parser.parse_line("LOQ ")
+        self.assertTrue(result is not None)
+        self.assertTrue(not result)
+
+
+class UserFileParserTest(unittest.TestCase):
+    def test_that_correct_parser_is_selected_(self):
+        # Arrange
+        user_file_parser = UserFileParser()
+
+        # DetParser
+        result = user_file_parser.parse_line(" DET/CoRR/FRONT/ SidE -957")
+        assert_valid_result(result, {DetectorId.correction_translation: single_entry_with_detector(entry=-957,
+                                     detector_type=DetectorType.HAB)}, self.assertTrue)
+
+        # LimitParser
+        result = user_file_parser.parse_line("l/Q/WCUT 234.4")
+        assert_valid_result(result, {LimitsId.wavelength_cut: 234.4}, self.assertTrue)
+
+        # MaskParser
+        result = user_file_parser.parse_line("MASK S 12  ")
+        assert_valid_result(result, {MaskId.single_spectrum_mask: 12}, self.assertTrue)
+
+        # SampleParser
+        result = user_file_parser.parse_line("SAMPLE /Offset 234.5")
+        assert_valid_result(result, {SampleId.offset: 234.5}, self.assertTrue)
+
+        # TransParser
+        result = user_file_parser.parse_line("TRANS / radius  =23")
+        assert_valid_result(result, {TransId.radius: 23}, self.assertTrue)
+
+        # TubeCalibFileParser
+        result = user_file_parser.parse_line("TUBECALIbfile= calib_file.nxs")
+        assert_valid_result(result, {TubeCalibrationFileId.file: "calib_file.nxs"}, self.assertTrue)
+
+        # QResolutionParser
+        result = user_file_parser.parse_line("QRESOL/ON")
+        assert_valid_result(result, {QResolutionId.on: True}, self.assertTrue)
+
+        # FitParser
+        result = user_file_parser.parse_line("FIT/TRANS/Straight 123 3556")
+        assert_valid_result(result, {FitId.general: fit_general(start=123, stop=3556, fit_type=FitType.Linear,
+                                                                data_type=None, polynomial_order=0)},
+                            self.assertTrue)
+
+        # GravityParser
+        result = user_file_parser.parse_line("Gravity/LExtra =23.5")
+        assert_valid_result(result, {GravityId.extra_length: 23.5}, self.assertTrue)
+
+        # MaskFileParser
+        result = user_file_parser.parse_line("MaskFile= test.xml,   testKsdk2.xml,tesetlskd.xml")
+        assert_valid_result(result, {MaskId.file: ["test.xml", "testKsdk2.xml", "tesetlskd.xml"]},
+                            self.assertTrue)
+
+        # MonParser
+        result = user_file_parser.parse_line("MON/length= 23.5 34")
+        assert_valid_result(result, {MonId.length: monitor_length(length=23.5, spectrum=34, interpolate=False)},
+                            self.assertTrue)
+
+        # PrintParser
+        result = user_file_parser.parse_line("PRINT OdlfP slsk 23lksdl2 34l")
+        assert_valid_result(result, {PrintId.print_line: "OdlfP slsk 23lksdl2 34l"}, self.assertTrue)
+
+        # BackParser
+        result = user_file_parser.parse_line("BACK / M3 /OFF")
+        assert_valid_result(result, {BackId.monitor_off: 3}, self.assertTrue)
+
+        # SANS2DParser
+        result = user_file_parser.parse_line("SANS2D")
+        self.assertTrue(not result)
+
+        # LOQParser
+        result = user_file_parser.parse_line("LOQ")
+        self.assertTrue(not result)
+
+    def test_that_non_existent_parser_throws(self):
+        # Arrange
+        user_file_parser = UserFileParser()
+
+        # Act + Assert
+        self.assertRaises(ValueError, user_file_parser.parse_line, "DetT/DKDK/ 23 23")
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/scripts/test/SANS/user_file/user_file_reader_test.py b/scripts/test/SANS/user_file/user_file_reader_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f6f0f45b1744acd03290529b8dbcb51c8af6387
--- /dev/null
+++ b/scripts/test/SANS/user_file/user_file_reader_test.py
@@ -0,0 +1,138 @@
+import unittest
+import mantid
+import os
+from sans.common.enums import (ISISReductionMode, DetectorType, RangeStepType, FitType)
+from sans.user_file.user_file_reader import UserFileReader
+from sans.user_file.user_file_common import (DetectorId, BackId, range_entry, back_single_monitor_entry,
+                                             single_entry_with_detector, mask_angle_entry, LimitsId, rebin_string_values,
+                                             simple_range, complex_range, MaskId, mask_block, mask_block_cross,
+                                             mask_line, range_entry_with_detector, SampleId, SetId, set_scales_entry,
+                                             position_entry, TransId, TubeCalibrationFileId, QResolutionId, FitId,
+                                             fit_general, MonId, monitor_length, monitor_file, GravityId,
+                                             monitor_spectrum, PrintId)
+from user_file_test_helper import create_user_file, sample_user_file
+
+
+# -----------------------------------------------------------------
+# --- Tests -------------------------------------------------------
+# -----------------------------------------------------------------
+class UserFileReaderTest(unittest.TestCase):
+    def test_that_can_read_user_file(self):
+        # Arrange
+        user_file_path = create_user_file(sample_user_file)
+        reader = UserFileReader(user_file_path)
+
+        # Act
+        output = reader.read_user_file()
+
+        # Assert
+        expected_values = {LimitsId.wavelength: [simple_range(start=1.5, stop=12.5, step=0.125,
+                                                              step_type=RangeStepType.Lin)],
+                           LimitsId.q: [complex_range(.001, .001, .0126, .08, .2, step_type1=RangeStepType.Lin,
+                                        step_type2=RangeStepType.Log)],
+                           LimitsId.qxy: [simple_range(0, 0.05, 0.001, RangeStepType.Lin)],
+                           BackId.single_monitors: [back_single_monitor_entry(1, 35000, 65000),
+                                                    back_single_monitor_entry(2, 85000, 98000)],
+                           DetectorId.reduction_mode: [ISISReductionMode.LAB],
+                           GravityId.on_off: [True],
+                           FitId.general: [fit_general(start=1.5, stop=12.5, fit_type=FitType.Log,
+                                                       data_type=None, polynomial_order=0)],
+                           MaskId.vertical_single_strip_mask: [single_entry_with_detector(191, DetectorType.LAB),
+                                                               single_entry_with_detector(191, DetectorType.HAB),
+                                                               single_entry_with_detector(0, DetectorType.LAB),
+                                                               single_entry_with_detector(0, DetectorType.HAB)],
+                           MaskId.horizontal_single_strip_mask: [single_entry_with_detector(0, DetectorType.LAB),
+                                                                 single_entry_with_detector(0, DetectorType.HAB)],
+                           MaskId.horizontal_range_strip_mask: [range_entry_with_detector(190, 191, DetectorType.LAB),
+                                                                range_entry_with_detector(167, 172, DetectorType.LAB),
+                                                                range_entry_with_detector(190, 191, DetectorType.HAB),
+                                                                range_entry_with_detector(156, 159, DetectorType.HAB)
+                                                                ],
+                           MaskId.time: [range_entry_with_detector(17500, 22000, None)],
+                           MonId.direct: [monitor_file("DIRECTM1_15785_12m_31Oct12_v12.dat", DetectorType.LAB),
+                                          monitor_file("DIRECTM1_15785_12m_31Oct12_v12.dat", DetectorType.HAB)],
+                           MonId.spectrum: [monitor_spectrum(1, True, True), monitor_spectrum(1, False, True)],
+                           SetId.centre: [position_entry(155.45, -169.6, DetectorType.LAB)],
+                           SetId.scales: [set_scales_entry(0.074, 1.0, 1.0, 1.0, 1.0)],
+                           SampleId.offset: [53.0],
+                           DetectorId.correction_x: [single_entry_with_detector(-16.0, DetectorType.LAB),
+                                                     single_entry_with_detector(-44.0, DetectorType.HAB)],
+                           DetectorId.correction_y: [single_entry_with_detector(-20.0, DetectorType.HAB)],
+                           DetectorId.correction_z: [single_entry_with_detector(47.0, DetectorType.LAB),
+                                                     single_entry_with_detector(47.0, DetectorType.HAB)],
+                           DetectorId.correction_rotation: [single_entry_with_detector(0.0, DetectorType.HAB)],
+                           LimitsId.events_binning: ["7000.0,500.0,60000.0"],
+                           MaskId.clear_detector_mask: [True],
+                           MaskId.clear_time_mask: [True],
+                           LimitsId.radius: [range_entry(12, 15)],
+                           TransId.spec_shift: [-70.0],
+                           PrintId.print_line: ["for changer"],
+                           BackId.all_monitors: [range_entry(start=3500, stop=4500)],
+                           FitId.monitor_times: [range_entry(start=1000, stop=2000)],
+                           TransId.spec: [4],
+                           BackId.trans: [range_entry(start=123, stop=466)],
+                           TransId.radius: [7.0],
+                           TransId.roi: ["test.xml", "test2.xml"],
+                           TransId.mask: ["test3.xml", "test4.xml"],
+                           SampleId.path: [True],
+                           LimitsId.radius_cut: [200.0],
+                           LimitsId.wavelength_cut: [8.0],
+                           QResolutionId.on: [True],
+                           QResolutionId.delta_r: [11.],
+                           QResolutionId.collimation_length: [12.],
+                           QResolutionId.a1: [13.],
+                           QResolutionId.a2: [14.],
+                           QResolutionId.moderator: ["moderator_rkh_file.txt"],
+                           TubeCalibrationFileId.file: ["TUBE_SANS2D_BOTH_31681_25Sept15.nxs"]}
+
+        self.assertTrue(len(expected_values) == len(output))
+        for key, value in list(expected_values.items()):
+            self.assertTrue(key in output)
+            self.assertTrue(len(output[key]) == len(value))
+            elements = output[key]
+            # Make sure that the different entries are sorted
+            UserFileReaderTest._sort_list(elements)
+            UserFileReaderTest._sort_list(value)
+            self.assertTrue(elements == value)
+
+        # clean up
+        if os.path.exists(user_file_path):
+            os.remove(user_file_path)
+
+    @staticmethod
+    def _sort_list(elements):
+        if len(elements) == 1:
+            return
+
+        if isinstance(elements[0], single_entry_with_detector):
+            UserFileReaderTest._sort(elements, lambda x: x.entry)
+        elif isinstance(elements[0], simple_range):
+            UserFileReaderTest._sort(elements, lambda x: x.start)
+        elif isinstance(elements[0], complex_range):
+            UserFileReaderTest._sort(elements, lambda x: x.start)
+        elif isinstance(elements[0], back_single_monitor_entry):
+            UserFileReaderTest._sort(elements, lambda x: x.monitor)
+        elif isinstance(elements[0], fit_general):
+            UserFileReaderTest._sort(elements, lambda x: x.start)
+        elif isinstance(elements[0], range_entry_with_detector):
+            UserFileReaderTest._sort(elements, lambda x: x.start)
+        elif isinstance(elements[0], monitor_file):
+            UserFileReaderTest._sort(elements, lambda x: (x.file_path, DetectorType.to_string(x.detector_type)))
+        elif isinstance(elements[0], monitor_spectrum):
+            UserFileReaderTest._sort(elements, lambda x: x.spectrum)
+        elif isinstance(elements[0], position_entry):
+            UserFileReaderTest._sort(elements, lambda x: x.pos1)
+        elif isinstance(elements[0], set_scales_entry):
+            UserFileReaderTest._sort(elements, lambda x: x.s)
+        elif isinstance(elements[0], range_entry):
+            UserFileReaderTest._sort(elements, lambda x: x.start)
+        else:
+            elements.sort()
+
+    @staticmethod
+    def _sort(elements, predicate):
+        elements.sort(key=predicate)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/scripts/test/SANS/user_file/user_file_state_director_test.py b/scripts/test/SANS/user_file/user_file_state_director_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..2bfd6a80eed297ceae5dfcb5f1282c70cbbfcbb4
--- /dev/null
+++ b/scripts/test/SANS/user_file/user_file_state_director_test.py
@@ -0,0 +1,222 @@
+import os
+import unittest
+import mantid
+
+
+from sans.user_file.user_file_state_director import UserFileStateDirectorISIS
+from sans.common.enums import (SANSFacility, ISISReductionMode, RangeStepType, RebinType, DataType, FitType,
+                               DetectorType)
+from sans.common.configurations import Configurations
+from sans.state.data import get_data_builder
+
+from user_file_test_helper import create_user_file, sample_user_file
+
+
+# -----------------------------------------------------------------
+# --- Tests -------------------------------------------------------
+# -----------------------------------------------------------------
+class UserFileStateDirectorISISTest(unittest.TestCase):
+    def _assert_data(self, state):
+        # The only item which can be set by the director in the data state is the tube calibration file
+        data = state.data
+        self.assertTrue(data.calibration == "TUBE_SANS2D_BOTH_31681_25Sept15.nxs")
+
+    def _assert_move(self, state):
+        move = state.move
+        # Check the elements which were set on move
+        self.assertTrue(move.sample_offset == 53.0/1000.)
+
+        # Detector specific
+        lab = move.detectors[DetectorType.to_string(DetectorType.LAB)]
+        hab = move.detectors[DetectorType.to_string(DetectorType.HAB)]
+        self.assertTrue(lab.x_translation_correction == -16.0/1000.)
+        self.assertTrue(lab.z_translation_correction == 47.0/1000.)
+        self.assertTrue(hab.x_translation_correction == -44.0/1000.)
+        self.assertTrue(hab.y_translation_correction == -20.0/1000.)
+        self.assertTrue(hab.z_translation_correction == 47.0/1000.)
+        self.assertTrue(hab.rotation_correction == 0.0)
+
+        # SANS2D-specific
+        self.assertTrue(move.monitor_4_offset == -70.0/1000.)
+
+    def _assert_mask(self, state):
+        mask = state.mask
+        self.assertTrue(mask.radius_min == 12/1000.)
+        self.assertTrue(mask.radius_max == 15/1000.)
+        self.assertTrue(mask.clear is True)
+        self.assertTrue(mask.clear_time is True)
+        self.assertTrue(mask.detectors[DetectorType.to_string(DetectorType.LAB)].single_horizontal_strip_mask == [0])
+        self.assertTrue(mask.detectors[DetectorType.to_string(DetectorType.LAB)].single_vertical_strip_mask == [0, 191])
+        self.assertTrue(mask.detectors[DetectorType.to_string(DetectorType.HAB)].single_horizontal_strip_mask == [0])
+        self.assertTrue(mask.detectors[DetectorType.to_string(DetectorType.HAB)].single_vertical_strip_mask == [0, 191])
+        self.assertTrue(mask.detectors[DetectorType.to_string(DetectorType.LAB)].range_horizontal_strip_start
+                        == [190, 167])
+        self.assertTrue(mask.detectors[DetectorType.to_string(DetectorType.LAB)].range_horizontal_strip_stop
+                        == [191, 172])
+        self.assertTrue(mask.detectors[DetectorType.to_string(DetectorType.HAB)].range_horizontal_strip_start
+                        == [190, 156])
+        self.assertTrue(mask.detectors[DetectorType.to_string(DetectorType.HAB)].range_horizontal_strip_stop
+                        == [191, 159])
+
+    def _assert_reduction(self, state):
+        reduction = state.reduction
+        self.assertTrue(reduction.reduction_mode is ISISReductionMode.LAB)
+
+    def _assert_scale(self, state):
+        scale = state.scale
+        self.assertTrue(scale.scale == 0.074)
+
+    def _assert_wavelength(self, state):
+        wavelength = state.wavelength
+        self.assertTrue(wavelength.wavelength_low == 1.5)
+        self.assertTrue(wavelength.wavelength_high == 12.5)
+        self.assertTrue(wavelength.wavelength_step == 0.125)
+        self.assertTrue(wavelength.wavelength_step_type is RangeStepType.Lin)
+
+    def _assert_convert_to_q(self, state):
+        convert_to_q = state.convert_to_q
+        self.assertTrue(convert_to_q.wavelength_cutoff == 8.0)
+        self.assertTrue(convert_to_q.radius_cutoff == 0.2)
+        self.assertTrue(convert_to_q.q_min == .001)
+        self.assertTrue(convert_to_q.q_max == .2)
+        self.assertTrue(convert_to_q.q_step == .001)
+        self.assertTrue(convert_to_q.q_step_type is RangeStepType.Lin)
+        self.assertTrue(convert_to_q.q_step2 == .08)
+        self.assertTrue(convert_to_q.q_step_type2 is RangeStepType.Log)
+        self.assertTrue(convert_to_q.use_gravity)
+
+        self.assertTrue(convert_to_q.use_q_resolution)
+        self.assertTrue(convert_to_q.q_resolution_a1 == 13./1000.)
+        self.assertTrue(convert_to_q.q_resolution_a2 == 14./1000.)
+        self.assertTrue(convert_to_q.q_resolution_delta_r == 11./1000.)
+        self.assertTrue(convert_to_q.moderator_file == "moderator_rkh_file.txt")
+        self.assertTrue(convert_to_q.q_resolution_collimation_length == 12.)
+
+    def _assert_adjustment(self, state):
+        adjustment = state.adjustment
+
+        # Normalize to monitor settings
+        normalize_to_monitor = adjustment.normalize_to_monitor
+        self.assertTrue(normalize_to_monitor.prompt_peak_correction_min == 1000)
+        self.assertTrue(normalize_to_monitor.prompt_peak_correction_max == 2000)
+        self.assertTrue(normalize_to_monitor.rebin_type is RebinType.InterpolatingRebin)
+        self.assertTrue(normalize_to_monitor.wavelength_low == 1.5)
+        self.assertTrue(normalize_to_monitor.wavelength_high == 12.5)
+        self.assertTrue(normalize_to_monitor.wavelength_step == 0.125)
+        self.assertTrue(normalize_to_monitor.wavelength_step_type is RangeStepType.Lin)
+        self.assertTrue(normalize_to_monitor.background_TOF_general_start == 3500)
+        self.assertTrue(normalize_to_monitor.background_TOF_general_stop == 4500)
+        self.assertTrue(normalize_to_monitor.background_TOF_monitor_start["1"] == 35000)
+        self.assertTrue(normalize_to_monitor.background_TOF_monitor_stop["1"] == 65000)
+        self.assertTrue(normalize_to_monitor.background_TOF_monitor_start["2"] == 85000)
+        self.assertTrue(normalize_to_monitor.background_TOF_monitor_stop["2"] == 98000)
+        self.assertTrue(normalize_to_monitor.incident_monitor == 1)
+
+        # Calculate Transmission
+        calculate_transmission = adjustment.calculate_transmission
+        self.assertTrue(calculate_transmission.prompt_peak_correction_min == 1000)
+        self.assertTrue(calculate_transmission.prompt_peak_correction_max == 2000)
+        self.assertTrue(calculate_transmission.default_transmission_monitor == 3)
+        self.assertTrue(calculate_transmission.default_incident_monitor == 2)
+        self.assertTrue(calculate_transmission.incident_monitor == 1)
+        self.assertTrue(calculate_transmission.transmission_radius_on_detector == 0.007)  # This is in mm
+        self.assertTrue(calculate_transmission.transmission_roi_files == ["test.xml", "test2.xml"])
+        self.assertTrue(calculate_transmission.transmission_mask_files == ["test3.xml", "test4.xml"])
+        self.assertTrue(calculate_transmission.transmission_monitor == 4)
+        self.assertTrue(calculate_transmission.rebin_type is RebinType.InterpolatingRebin)
+        self.assertTrue(calculate_transmission.wavelength_low == 1.5)
+        self.assertTrue(calculate_transmission.wavelength_high == 12.5)
+        self.assertTrue(calculate_transmission.wavelength_step == 0.125)
+        self.assertTrue(calculate_transmission.wavelength_step_type is RangeStepType.Lin)
+        self.assertFalse(calculate_transmission.use_full_wavelength_range)
+        self.assertTrue(calculate_transmission.wavelength_full_range_low ==
+                        Configurations.SANS2D.wavelength_full_range_low)
+        self.assertTrue(calculate_transmission.wavelength_full_range_high ==
+                        Configurations.SANS2D.wavelength_full_range_high)
+        self.assertTrue(calculate_transmission.background_TOF_general_start == 3500)
+        self.assertTrue(calculate_transmission.background_TOF_general_stop == 4500)
+        self.assertTrue(calculate_transmission.background_TOF_monitor_start["1"] == 35000)
+        self.assertTrue(calculate_transmission.background_TOF_monitor_stop["1"] == 65000)
+        self.assertTrue(calculate_transmission.background_TOF_monitor_start["2"] == 85000)
+        self.assertTrue(calculate_transmission.background_TOF_monitor_stop["2"] == 98000)
+        self.assertTrue(calculate_transmission.background_TOF_roi_start == 123)
+        self.assertTrue(calculate_transmission.background_TOF_roi_stop == 466)
+        self.assertTrue(calculate_transmission.fit[DataType.to_string(DataType.Sample)].fit_type is FitType.Log)
+        self.assertTrue(calculate_transmission.fit[DataType.to_string(DataType.Sample)].wavelength_low == 1.5)
+        self.assertTrue(calculate_transmission.fit[DataType.to_string(DataType.Sample)].wavelength_high == 12.5)
+        self.assertTrue(calculate_transmission.fit[DataType.to_string(DataType.Sample)].polynomial_order == 0)
+        self.assertTrue(calculate_transmission.fit[DataType.to_string(DataType.Can)].fit_type is FitType.Log)
+        self.assertTrue(calculate_transmission.fit[DataType.to_string(DataType.Can)].wavelength_low == 1.5)
+        self.assertTrue(calculate_transmission.fit[DataType.to_string(DataType.Can)].wavelength_high == 12.5)
+        self.assertTrue(calculate_transmission.fit[DataType.to_string(DataType.Can)].polynomial_order == 0)
+
+        # Wavelength and Pixel Adjustment
+        wavelength_and_pixel_adjustment = adjustment.wavelength_and_pixel_adjustment
+        self.assertTrue(wavelength_and_pixel_adjustment.wavelength_low == 1.5)
+        self.assertTrue(wavelength_and_pixel_adjustment.wavelength_high == 12.5)
+        self.assertTrue(wavelength_and_pixel_adjustment.wavelength_step == 0.125)
+        self.assertTrue(wavelength_and_pixel_adjustment.wavelength_step_type is RangeStepType.Lin)
+        self.assertTrue(wavelength_and_pixel_adjustment.adjustment_files[
+                        DetectorType.to_string(DetectorType.LAB)].wavelength_adjustment_file ==
+                        "DIRECTM1_15785_12m_31Oct12_v12.dat")
+        self.assertTrue(wavelength_and_pixel_adjustment.adjustment_files[
+                        DetectorType.to_string(DetectorType.HAB)].wavelength_adjustment_file ==
+                        "DIRECTM1_15785_12m_31Oct12_v12.dat")
+
+        # Assert wide angle correction
+        self.assertTrue(state.adjustment.wide_angle_correction)
+
+    def test_state_can_be_created_from_valid_user_file_with_data_information(self):
+        # Arrange
+        data_builder = get_data_builder(SANSFacility.ISIS)
+        data_builder.set_sample_scatter("SANS2D00022024")
+        data_builder.set_sample_scatter_period(3)
+        data_state = data_builder.build()
+
+        director = UserFileStateDirectorISIS(data_state)
+        user_file_path = create_user_file(sample_user_file)
+
+        director.set_user_file(user_file_path)
+        # TODO: Add manual settings
+        state = director.construct()
+
+        # Assert
+        self._assert_data(state)
+        self._assert_move(state)
+        self._assert_mask(state)
+        self._assert_reduction(state)
+        self._assert_wavelength(state)
+        self._assert_scale(state)
+        self._assert_adjustment(state)
+        self._assert_convert_to_q(state)
+
+        # clean up
+        if os.path.exists(user_file_path):
+            os.remove(user_file_path)
+
+    def test_stat_can_be_crated_from_valid_user_file_and_later_on_reset(self):
+        # Arrange
+        data_builder = get_data_builder(SANSFacility.ISIS)
+        data_builder.set_sample_scatter("SANS2D00022024")
+        data_builder.set_sample_scatter_period(3)
+        data_state = data_builder.build()
+
+        director = UserFileStateDirectorISIS(data_state)
+        user_file_path = create_user_file(sample_user_file)
+        director.set_user_file(user_file_path)
+
+        # Act
+        director.set_mask_builder_radius_min(0.001298)
+        director.set_mask_builder_radius_max(0.003298)
+        state = director.construct()
+
+        # Assert
+        self.assertTrue(state.mask.radius_min == 0.001298)
+        self.assertTrue(state.mask.radius_max == 0.003298)
+
+        # clean up
+        if os.path.exists(user_file_path):
+            os.remove(user_file_path)
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/scripts/test/SANS/user_file/user_file_test_helper.py b/scripts/test/SANS/user_file/user_file_test_helper.py
new file mode 100644
index 0000000000000000000000000000000000000000..a94ce69da14947c4b3d38f656f45a43f49762885
--- /dev/null
+++ b/scripts/test/SANS/user_file/user_file_test_helper.py
@@ -0,0 +1,89 @@
+import os
+import mantid
+
+sample_user_file = ("PRINT for changer\n"
+                    "MASK/CLEAR \n"
+                    "MASK/CLEAR/TIME\n"
+                    "L/WAV 1.5 12.5 0.125/LIN\n"
+                    "L/Q .001,.001, .0126, -.08, .2\n"
+                    "!L/Q .001 .8 .08/log\n"
+                    "L/QXY 0 0.05 .001/lin\n"
+                    "BACK/M1 35000 65000\n"
+                    "BACK/M2 85000 98000\n"
+                    "BACK/MON/TIMES 3500 4500\n"
+                    "BACK/TRANS 123 466\n"
+                    "DET/REAR\n"
+                    "GRAVITY/ON\n"
+                    "!FIT/TRANS/OFF\n"
+                    "FIT/TRANS/LOG 1.5 12.5\n"
+                    "FIT/MONITOR 1000 2000\n"
+                    "mask/rear h0\n"
+                    "mask/rear h190>h191\n"
+                    "mask/rear h167>h172\n"
+                    "mask/rear v0\n"
+                    "mask/rear v191\n"
+                    "mask/front h0\n"
+                    "mask/front h190>h191\n"
+                    "mask/front v0\n"
+                    "mask/front v191\n"
+                    "! dead wire near top\n"
+                    "mask/front h156>h159\n"
+                    "!masking off beamstop arm - 12mm wide @ 19degrees\n"
+                    "!mask/rear/line 12 19\n"
+                    "! spot on rhs beam stop at 11m\n"
+                    "! mask h57>h66+v134>v141\n"
+                    "!\n"
+                    "! mask for Bragg at 12m, 26/03/11, 3 time channels\n"
+                    "mask/time 17500 22000\n"
+                    "!\n"
+                    "L/R 12 15\n"
+                    "L/Q/RCut 200\n"
+                    "L/Q/WCut 8.0\n"
+                    "!PRINT REMOVED RCut=200 WCut=8\n"
+                    "!\n"
+                    "MON/DIRECT=DIRECTM1_15785_12m_31Oct12_v12.dat\n"
+                    "MON/TRANS/SPECTRUM=1/INTERPOLATE\n"
+                    "MON/SPECTRUM=1/INTERPOLATE\n"
+                    "!TRANS/TRANSPEC=3\n"
+                    "TRANS/TRANSPEC=4/SHIFT=-70\n"
+                    "TRANS/RADIUS=7.0\n"
+                    "TRANS/ROI=test.xml, test2.xml\n"
+                    "TRANS/MASK=test3.xml, test4.xml\n"
+                    "!\n"
+                    "set centre 155.45 -169.6\n"
+                    "!\n"
+                    "! 25/10/13 centre gc 22021, fit gdw20 22023\n"
+                    "set scales 0.074 1.0 1.0 1.0 1.0\n"
+                    "! correction to actual sample position, notionally 81mm before shutter\n"
+                    "SAMPLE/OFFSET +53.0\n"
+                    "! Correction to SANS2D encoders in mm\n"
+                    "DET/CORR/REAR/X -16.0\n"
+                    "DET/CORR/REAR/Z 47.0\n"
+                    "DET/CORR/FRONT/X -44.0\n"
+                    "DET/CORR/FRONT/Y -20.0\n"
+                    "DET/CORR/FRONT/Z 47.0\n"
+                    "DET/CORR/FRONT/ROT 0.0\n"
+                    "!\n"
+                    "!! 01/10/13 MASKSANS2d_133F M3 by M1 trans Hellsing, Rennie, Jackson, L1=L2=12m A1=20 and A2=8mm\n"
+                    "L/EVENTSTIME 7000.0,500.0,60000.0\n"
+                    "SAMPLE/PATH/ON\n"
+                    "QRESOL/ON \n"
+                    "QRESOL/DELTAR=11 \n"
+                    "QRESOL/LCOLLIM=12 \n"
+                    "QRESOL/MODERATOR=moderator_rkh_file.txt\n"
+                    "QRESOL/A1=13\n"
+                    "QRESOL/A2=14\n"
+                    "TUBECALIBFILE=TUBE_SANS2D_BOTH_31681_25Sept15.nxs"
+                    )
+
+
+def create_user_file(user_file_content):
+    user_file_path = os.path.join(mantid.config.getString('defaultsave.directory'), 'sample_sans_user_file.txt')
+    if os.path.exists(user_file_path):
+        os.remove(user_file_path)
+
+    with open(user_file_path, 'w') as f:
+        f.write(user_file_content)
+
+    return user_file_path
+