diff --git a/.flake8 b/.flake8
index 8e4424874f40d99fdf4d6361f1c0928be59217b4..699c5146226534fbb165b134f12d2ac5b59fc3e2 100644
--- a/.flake8
+++ b/.flake8
@@ -1,6 +1,7 @@
 [flake8]
 ignore = E114,E115,E116,E121,E123,E126,E133,E2,E704,W503,F403,F405,F999
 exclude =
+    .git,
     buildconfig,
     docs,
     Framework/Algorithms/test,
diff --git a/Framework/API/CMakeLists.txt b/Framework/API/CMakeLists.txt
index 13eb185b3bccc18714a76e94e6eb7221db26cbbe..58083f8d9796b99005d9f85f0b56923a52eb0dd0 100644
--- a/Framework/API/CMakeLists.txt
+++ b/Framework/API/CMakeLists.txt
@@ -29,6 +29,7 @@ set ( SRC_FILES
 	src/DataProcessorAlgorithm.cpp
 	src/DeprecatedAlgorithm.cpp
 	src/DetectorInfo.cpp
+        src/DetectorSearcher.cpp
 	src/DomainCreatorFactory.cpp
 	src/EnabledWhenWorkspaceIsType.cpp
 	src/EqualBinSizesValidator.cpp
@@ -45,6 +46,7 @@ set ( SRC_FILES
 	src/FunctionDomainGeneral.cpp
 	src/FunctionDomainMD.cpp
 	src/FunctionFactory.cpp
+	src/FunctionGenerator.cpp
 	src/FunctionParameterDecorator.cpp
 	src/FunctionProperty.cpp
 	src/FunctionValues.cpp
@@ -103,8 +105,8 @@ set ( SRC_FILES
 	src/MultiPeriodGroupWorker.cpp
 	src/MultipleExperimentInfos.cpp
 	src/MultipleFileProperty.cpp
-	src/NearestNeighbourInfo.cpp
-	src/NearestNeighbours.cpp
+        src/WorkspaceNearestNeighbourInfo.cpp
+        src/WorkspaceNearestNeighbours.cpp
 	src/NotebookBuilder.cpp
 	src/NotebookWriter.cpp
 	src/NullCoordTransform.cpp
@@ -189,6 +191,7 @@ set ( INC_FILES
 	inc/MantidAPI/DeclareUserAlg.h
 	inc/MantidAPI/DeprecatedAlgorithm.h
 	inc/MantidAPI/DetectorInfo.h
+        inc/MantidAPI/DetectorSearcher.h
 	inc/MantidAPI/DllConfig.h
 	inc/MantidAPI/DomainCreatorFactory.h
 	inc/MantidAPI/EnabledWhenWorkspaceIsType.h
@@ -207,6 +210,7 @@ set ( INC_FILES
 	inc/MantidAPI/FunctionDomainGeneral.h
 	inc/MantidAPI/FunctionDomainMD.h
 	inc/MantidAPI/FunctionFactory.h
+	inc/MantidAPI/FunctionGenerator.h
 	inc/MantidAPI/FunctionParameterDecorator.h
 	inc/MantidAPI/FunctionProperty.h
 	inc/MantidAPI/FunctionValues.h
@@ -295,8 +299,8 @@ set ( INC_FILES
 	inc/MantidAPI/MultiPeriodGroupWorker.h
 	inc/MantidAPI/MultipleExperimentInfos.h
 	inc/MantidAPI/MultipleFileProperty.h
-	inc/MantidAPI/NearestNeighbourInfo.h
-	inc/MantidAPI/NearestNeighbours.h
+  inc/MantidAPI/WorkspaceNearestNeighbourInfo.h
+  inc/MantidAPI/WorkspaceNearestNeighbours.h
 	inc/MantidAPI/NotebookBuilder.h
 	inc/MantidAPI/NotebookWriter.h
 	inc/MantidAPI/NullCoordTransform.h
@@ -364,6 +368,7 @@ set ( TEST_FILES
 	CostFunctionFactoryTest.h
 	DataProcessorAlgorithmTest.h
 	DetectorInfoTest.h
+        DetectorSearcherTest.h
 	EnabledWhenWorkspaceIsTypeTest.h
 	EqualBinSizesValidatorTest.h
 	ExperimentInfoTest.h
@@ -414,8 +419,8 @@ set ( TEST_FILES
 	MultiPeriodGroupWorkerTest.h
 	MultipleExperimentInfosTest.h
 	MultipleFilePropertyTest.h
-	NearestNeighbourInfoTest.h
-	NearestNeighboursTest.h
+        WorkspaceNearestNeighbourInfoTest.h
+        WorkspaceNearestNeighboursTest.h
 	NotebookBuilderTest.h
 	NotebookWriterTest.h
 	NumericAxisTest.h
diff --git a/Framework/API/inc/MantidAPI/CompositeFunction.h b/Framework/API/inc/MantidAPI/CompositeFunction.h
index 93ae8af41907522ef83c4435bbf7b83b599a7fd1..3e84c101ab35ddf8a9e3b6d8e0a0faeece97502e 100644
--- a/Framework/API/inc/MantidAPI/CompositeFunction.h
+++ b/Framework/API/inc/MantidAPI/CompositeFunction.h
@@ -111,13 +111,6 @@ public:
   /// Set the fitting error for a parameter
   void setError(size_t i, double err) override;
 
-  /// Check if a parameter is active
-  bool isFixed(size_t i) const override;
-  /// Removes a parameter from the list of active
-  void fix(size_t i) override;
-  /// Restores a declared parameter i to the active status
-  void unfix(size_t i) override;
-
   /// Value of i-th active parameter. Override this method to make fitted
   /// parameters different from the declared
   double activeParameter(size_t i) const override;
@@ -130,8 +123,6 @@ public:
   std::string nameOfActive(size_t i) const override;
   /// Returns the name of active parameter i
   std::string descriptionOfActive(size_t i) const override;
-  /// Check if an active parameter i is actually active
-  bool isActive(size_t i) const override;
 
   /// Return parameter index from a parameter reference.
   size_t getParameterIndex(const ParameterReference &ref) const override;
@@ -149,11 +140,7 @@ public:
   bool removeTie(size_t i) override;
   /// Get the tie of i-th parameter
   ParameterTie *getTie(size_t i) const override;
-  /// Add a new tie
-  void addTie(std::unique_ptr<ParameterTie> tie) override;
 
-  /// Overwrite IFunction methods
-  void addConstraint(std::unique_ptr<IConstraint> ic) override;
   /// Get constraint of i-th parameter
   IConstraint *getConstraint(size_t i) const override;
   /// Prepare function for a fit
@@ -229,6 +216,10 @@ protected:
   /// Declare a new parameter
   void declareParameter(const std::string &name, double initValue = 0,
                         const std::string &description = "") override;
+  /// Change status of parameter
+  void setParameterStatus(size_t i, ParameterStatus status) override;
+  /// Get status of parameter
+  ParameterStatus getParameterStatus(size_t i) const override;
 
   size_t paramOffset(size_t i) const { return m_paramOffsets[i]; }
 
diff --git a/Framework/API/inc/MantidAPI/DetectorSearcher.h b/Framework/API/inc/MantidAPI/DetectorSearcher.h
new file mode 100644
index 0000000000000000000000000000000000000000..3024479af9b50dbcb01fed9a693bc68d3608d571
--- /dev/null
+++ b/Framework/API/inc/MantidAPI/DetectorSearcher.h
@@ -0,0 +1,107 @@
+#ifndef MANTID_DETECTOR_SEARCHER_H_
+#define MANTID_DETECTOR_SEARCHER_H_
+
+#include "MantidAPI/DetectorInfo.h"
+#include "MantidAPI/DllConfig.h"
+#include "MantidGeometry/Instrument.h"
+#include "MantidGeometry/Objects/InstrumentRayTracer.h"
+#include "MantidKernel/NearestNeighbours.h"
+#include "MantidKernel/V3D.h"
+
+#include <Eigen/Core>
+
+/**
+  DetectorSearcher is a helper class to find a specific detector within
+  the instrument geometry.
+
+  This class solves the problem of finding a detector given a Qlab vector. Two
+  search strategies are used depending on the instrument's geometry.
+
+  1) For rectangular detector geometries the InstrumentRayTracer class is used
+  to recursively search the instrument tree.
+
+  2) For geometries which do not use rectangular detectors ray tracing to every
+  component is very expensive. In this case it is quicker to use a
+  NearestNeighbours search to find likely detector positions.
+
+  @author Samuel Jackson
+  @date 2017
+
+  Copyright &copy; 2016 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge
+  National Laboratory & European Spallation Source
+
+  This file is part of Mantid.
+
+  Mantid is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  Mantid is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+  File change history is stored at: <https://github.com/mantidproject/mantid>
+  Code Documentation is available at: <http://doxygen.mantidproject.org>
+*/
+
+namespace Mantid {
+namespace API {
+
+class MANTID_API_DLL DetectorSearcher {
+public:
+  /// Search result type representing whether a detector was found and if so
+  /// which detector index it was.
+  typedef std::tuple<bool, size_t> DetectorSearchResult;
+
+  /// Create a new DetectorSearcher with the given instrument & detectors
+  DetectorSearcher(Geometry::Instrument_const_sptr instrument,
+                   const DetectorInfo &detInfo);
+  /// Find a detector that intsects with the given Qlab vector
+  DetectorSearchResult findDetectorIndex(const Kernel::V3D &q);
+
+private:
+  /// Attempt to find a detector using a full instrument ray tracing strategy
+  DetectorSearchResult searchUsingInstrumentRayTracing(const Kernel::V3D &q);
+  /// Attempt to find a detector using a nearest neighbours search strategy
+  DetectorSearchResult searchUsingNearestNeighbours(const Kernel::V3D &q);
+  /// Check whether the given direction in detector space intercepts with a
+  /// detector
+  std::tuple<bool, size_t> checkInteceptWithNeighbours(
+      const Kernel::V3D &direction,
+      const Kernel::NearestNeighbours<3>::NearestNeighbourResults &neighbours)
+      const;
+  /// Helper function to build the nearest neighbour tree
+  void createDetectorCache();
+  /// Helper function to convert a Qlab vector to a direction in detector space
+  Kernel::V3D convertQtoDirection(const Kernel::V3D &q) const;
+  /// Helper function to handle the tube gap parameter in tube instruments
+  DetectorSearchResult handleTubeGap(
+      const Kernel::V3D &detectorDir,
+      const Kernel::NearestNeighbours<3>::NearestNeighbourResults &neighbours);
+
+  // Instance variables
+
+  /// flag for whether to use InstrumentRayTracer or NearestNeighbours
+  const bool m_usingFullRayTrace;
+  /// flag for whether the crystallography convention is to be used
+  const double m_crystallography_convention;
+  /// detector info for the instrument
+  const DetectorInfo &m_detInfo;
+  /// handle to the instrument to search for detectors in
+  Geometry::Instrument_const_sptr m_instrument;
+  /// vector of detector indicies used in the search
+  std::vector<size_t> m_indexMap;
+  /// Detector search cache for fast look-up of detectors
+  std::unique_ptr<Kernel::NearestNeighbours<3>> m_detectorCacheSearch;
+  /// instrument ray tracer object for searching in rectangular detectors
+  std::unique_ptr<Geometry::InstrumentRayTracer> m_rayTracer;
+};
+}
+}
+
+#endif
diff --git a/Framework/CurveFitting/inc/MantidCurveFitting/Functions/FunctionGenerator.h b/Framework/API/inc/MantidAPI/FunctionGenerator.h
similarity index 72%
rename from Framework/CurveFitting/inc/MantidCurveFitting/Functions/FunctionGenerator.h
rename to Framework/API/inc/MantidAPI/FunctionGenerator.h
index b3d391c2038feb72eb1e6d0a705138b853c22132..40c5fe59051834a8a5cb1f6ff0761c0d014a6951 100644
--- a/Framework/CurveFitting/inc/MantidCurveFitting/Functions/FunctionGenerator.h
+++ b/Framework/API/inc/MantidAPI/FunctionGenerator.h
@@ -1,11 +1,10 @@
-#ifndef MANTID_CURVEFITTING_FUNCTIONGENERATOR_H_
-#define MANTID_CURVEFITTING_FUNCTIONGENERATOR_H_
+#ifndef MANTID_API_FUNCTIONGENERATOR_H_
+#define MANTID_API_FUNCTIONGENERATOR_H_
 
 #include "MantidAPI/IFunction.h"
 
 namespace Mantid {
-namespace CurveFitting {
-namespace Functions {
+namespace API {
 /**
 FunctionGenerator is a partial implementation of IFunction that defines a
 function consisting of two parts: the source and the target. The source
@@ -45,10 +44,10 @@ along with this program.  If not, see <http://www.gnu.org/licenses/>.
 File change history is stored at: <https://github.com/mantidproject/mantid>
 Code Documentation is available at: <http://doxygen.mantidproject.org>
 */
-class DLLExport FunctionGenerator : public API::IFunction {
+class DLLExport FunctionGenerator : public IFunction {
 public:
   /// Constructor
-  FunctionGenerator(API::IFunction_sptr source);
+  FunctionGenerator(IFunction_sptr source);
 
   /// @name Overrides implementing composition of two functions:
   /// m_source and m_target.
@@ -83,38 +82,14 @@ public:
   /// Set the fitting error for a parameter
   void setError(size_t i, double err) override;
 
-  /// Check if a declared parameter i is fixed
-  bool isFixed(size_t i) const override;
-  /// Removes a declared parameter i from the list of active
-  void fix(size_t i) override;
-  /// Restores a declared parameter i to the active status
-  void unfix(size_t i) override;
-
   /// Return parameter index from a parameter reference.
-  size_t getParameterIndex(const API::ParameterReference &ref) const override;
-  /// Tie a parameter to other parameters (or a constant)
-  void tie(const std::string &parName, const std::string &expr,
-           bool isDefault = false) override;
-  /// Apply the ties
-  void applyTies() override;
-  /// Remove all ties
-  void clearTies() override;
-  // Unhide base class function: removeTie(string).
-  using IFunction::removeTie;
-  /// Removes i-th parameter's tie
-  bool removeTie(size_t i) override;
-  /// Get the tie of i-th parameter
-  API::ParameterTie *getTie(size_t i) const override;
-
-  /// Add a constraint to function
-  void addConstraint(std::unique_ptr<API::IConstraint> ic) override;
-  /// Get constraint of i-th parameter
-  API::IConstraint *getConstraint(size_t i) const override;
-  /// Remove a constraint
-  void removeConstraint(const std::string &parName) override;
-
+  size_t getParameterIndex(const ParameterReference &ref) const override;
   /// Set up the function for a fit.
   void setUpForFit() override;
+  /// Get the tie for i-th parameter
+  ParameterTie *getTie(size_t i) const override;
+  /// Get the i-th constraint
+  IConstraint *getConstraint(size_t i) const override;
 
   /// Build target function.
   virtual void buildTargetFunction() const = 0;
@@ -123,9 +98,10 @@ protected:
   /// Declare a new parameter
   void declareParameter(const std::string &name, double initValue = 0,
                         const std::string &description = "") override;
-
-  /// Add a new tie. Derived classes must provide storage for ties
-  void addTie(std::unique_ptr<API::ParameterTie> tie) override;
+  /// Change status of parameter
+  void setParameterStatus(size_t i, ParameterStatus status) override;
+  /// Get status of parameter
+  ParameterStatus getParameterStatus(size_t i) const override;
   //@}
 
 public:
@@ -144,8 +120,8 @@ public:
   //@}
 
   /// Evaluate the function
-  void function(const API::FunctionDomain &domain,
-                API::FunctionValues &values) const override;
+  void function(const FunctionDomain &domain,
+                FunctionValues &values) const override;
 
 protected:
   /// overwrite IFunction base class method, which declare function parameters
@@ -158,17 +134,16 @@ protected:
   /// Update target function if necessary.
   void checkTargetFunction() const;
   /// Function that calculates parameters of the target function.
-  API::IFunction_sptr m_source;
+  IFunction_sptr m_source;
   /// Function that actually calculates the output.
-  mutable API::IFunction_sptr m_target;
+  mutable IFunction_sptr m_target;
   /// Cached number of parameters in m_source.
   size_t m_nOwnParams;
   /// Flag indicating that updateTargetFunction() is required.
   mutable bool m_dirty;
 };
 
-} // namespace Functions
-} // namespace CurveFitting
+} // namespace API
 } // namespace Mantid
 
-#endif /*MANTID_CURVEFITTING_FUNCTIONGENERATOR_H_*/
+#endif /*MANTID_API_FUNCTIONGENERATOR_H_*/
diff --git a/Framework/API/inc/MantidAPI/FunctionParameterDecorator.h b/Framework/API/inc/MantidAPI/FunctionParameterDecorator.h
index 2ad22fbcc8956be035e59af2402f91f92f6dbe83..e4d9c5b860c2bd7d4dec59cf5973a171bbae98da 100644
--- a/Framework/API/inc/MantidAPI/FunctionParameterDecorator.h
+++ b/Framework/API/inc/MantidAPI/FunctionParameterDecorator.h
@@ -91,15 +91,6 @@ public:
   /// Set the fitting error for a parameter of decorated function.
   void setError(size_t i, double err) override;
 
-  /// Check if a declared parameter i of decorated function is active.
-  bool isFixed(size_t i) const override;
-  /// Removes a declared parameter i of decorated function from the list of
-  /// active.
-  void fix(size_t i) override;
-  /// Restores a declared parameter i of decorated function to the active
-  /// status.
-  void unfix(size_t i) override;
-
   /// Return parameter index of decorated function from a parameter reference.
   /// Usefull for constraints and ties in composite functions.
   size_t getParameterIndex(const ParameterReference &ref) const override;
@@ -148,6 +139,8 @@ protected:
                         const std::string &description) override;
 
   void addTie(std::unique_ptr<ParameterTie>) override;
+  void setParameterStatus(size_t i, ParameterStatus status) override;
+  ParameterStatus getParameterStatus(size_t i) const override;
 
   virtual void beforeDecoratedFunctionSet(const IFunction_sptr &fn);
   void setDecoratedFunctionPrivate(const IFunction_sptr &fn);
diff --git a/Framework/API/inc/MantidAPI/IConstraint.h b/Framework/API/inc/MantidAPI/IConstraint.h
index e443a2e1db538d21ebea0217f77a7dfd8914a72f..154b2bcc63c5dc4f12518dbe0ffd9e85884532fb 100644
--- a/Framework/API/inc/MantidAPI/IConstraint.h
+++ b/Framework/API/inc/MantidAPI/IConstraint.h
@@ -4,8 +4,8 @@
 //----------------------------------------------------------------------
 // Includes
 //----------------------------------------------------------------------
-#include "MantidAPI/IFunction.h"
 #include "MantidAPI/ParameterReference.h"
+#include <string>
 
 namespace Mantid {
 namespace API {
diff --git a/Framework/API/inc/MantidAPI/IFunction.h b/Framework/API/inc/MantidAPI/IFunction.h
index 04e9342df3d8a219fb57886a1be9ff95fbf99fce..a61d19233ec64f70c74a8e81cc42d5c9ca7b5285 100644
--- a/Framework/API/inc/MantidAPI/IFunction.h
+++ b/Framework/API/inc/MantidAPI/IFunction.h
@@ -7,7 +7,9 @@
 #include "MantidAPI/DllConfig.h"
 #include "MantidAPI/FunctionDomain.h"
 #include "MantidAPI/FunctionValues.h"
+#include "MantidAPI/IConstraint.h"
 #include "MantidAPI/Jacobian.h"
+#include "MantidAPI/ParameterTie.h"
 #include "MantidKernel/Matrix.h"
 #include "MantidKernel/Unit.h"
 
@@ -33,9 +35,6 @@ class ProgressBase;
 namespace API {
 class Workspace;
 class MatrixWorkspace;
-class ParameterTie;
-class IConstraint;
-class ParameterReference;
 class FunctionHandler;
 
 /** This is an interface to a fitting function - a semi-abstarct class.
@@ -256,6 +255,8 @@ public:
     /// Create vector attribute
     explicit Attribute(const std::vector<double> &v)
         : m_data(v), m_quoteValue(false) {}
+    /// Copy assignment
+    Attribute &operator=(const Attribute &attr);
 
     /// Apply an attribute visitor
     template <typename T> T apply(AttributeVisitor<T> &v) {
@@ -312,9 +313,7 @@ public:
   //---------------------------------------------------------//
 
   /// Constructor
-  IFunction()
-      : m_isParallel(false), m_handler(nullptr), m_progReporter(nullptr),
-        m_chiSquared(0.0) {}
+  IFunction() : m_isParallel(false), m_handler(nullptr), m_chiSquared(0.0) {}
   /// Virtual destructor
   virtual ~IFunction();
   /// No copying
@@ -344,7 +343,7 @@ public:
   virtual int64_t estimateNoProgressCalls() const { return 1; }
 
   /// Attach a progress reporter
-  void setProgressReporter(Kernel::ProgressBase *reporter);
+  void setProgressReporter(boost::shared_ptr<Kernel::ProgressBase> reporter);
   /// Reports progress with an optional message
   void reportProgress(const std::string &msg = "") const;
   /// Returns true if a progress reporter is set & evalaution has been requested
@@ -412,20 +411,26 @@ public:
   /// Set the fitting error for a parameter
   virtual void setError(size_t i, double err) = 0;
 
-  /// Check if a declared parameter i is fixed
-  virtual bool isFixed(size_t i) const = 0;
-  /// Removes a declared parameter i from the list of active
-  virtual void fix(size_t i) = 0;
+  /// Check if a parameter i is fixed
+  bool isFixed(size_t i) const;
+  /// Check if a parameter i is fixed by default (not by user).
+  bool isFixedByDefault(size_t i) const;
+  /// Removes a parameter i from the list of active
+  void fix(size_t i, bool isDefault = false);
   /// Restores a declared parameter i to the active status
-  virtual void unfix(size_t i) = 0;
+  void unfix(size_t i);
   /// Fix a parameter
-  void fixParameter(const std::string &name);
+  void fixParameter(const std::string &name, bool isDefault = false);
   /// Free a parameter
   void unfixParameter(const std::string &name);
   /// Fix all parameters
-  void fixAll();
+  void fixAll(bool isDefault = false);
   /// Free all parameters
   void unfixAll();
+  /// Free all parameters fixed by default
+  void unfixAllDefault();
+  /// Fix all active parameters
+  void fixAllActive(bool isDefault = false);
 
   /// Return parameter index from a parameter reference. Usefull for constraints
   /// and ties in composite functions
@@ -447,7 +452,7 @@ public:
   /// Returns the name of active parameter i
   virtual std::string descriptionOfActive(size_t i) const;
   /// Check if an active parameter i is actually active
-  virtual bool isActive(size_t i) const { return !isFixed(i); }
+  bool isActive(size_t i) const;
   //@}
 
   /** @name Ties */
@@ -458,17 +463,17 @@ public:
   /// Add several ties
   virtual void addTies(const std::string &ties, bool isDefault = false);
   /// Apply the ties
-  virtual void applyTies() = 0;
+  virtual void applyTies();
   /// Removes the tie off a parameter
   virtual void removeTie(const std::string &parName);
   /// Remove all ties
-  virtual void clearTies() = 0;
+  virtual void clearTies();
   /// Removes i-th parameter's tie
-  virtual bool removeTie(size_t i) = 0;
+  virtual bool removeTie(size_t i);
   /// Get the tie of i-th parameter
-  virtual ParameterTie *getTie(size_t i) const = 0;
-  /// Add a new tie. Derived classes must provide storage for ties
-  virtual void addTie(std::unique_ptr<ParameterTie> tie) = 0;
+  virtual ParameterTie *getTie(size_t i) const;
+  /// Write a parameter tie to a string
+  std::string writeTies() const;
   //@}
 
   /** @name Constraints */
@@ -476,11 +481,15 @@ public:
   /// Add a list of conatraints from a string
   virtual void addConstraints(const std::string &str, bool isDefault = false);
   /// Add a constraint to function
-  virtual void addConstraint(std::unique_ptr<IConstraint> ic) = 0;
+  virtual void addConstraint(std::unique_ptr<IConstraint> ic);
   /// Get constraint of i-th parameter
-  virtual IConstraint *getConstraint(size_t i) const = 0;
+  virtual IConstraint *getConstraint(size_t i) const;
   /// Remove a constraint
-  virtual void removeConstraint(const std::string &parName) = 0;
+  virtual void removeConstraint(const std::string &parName);
+  /// Write a parameter constraint to a string
+  std::string writeConstraints() const;
+  /// Remove all constraints.
+  virtual void clearConstraints();
   //@}
 
   /** @name Attributes */
@@ -505,7 +514,7 @@ public:
   //@}
 
   /// Set up the function for a fit.
-  virtual void setUpForFit() = 0;
+  virtual void setUpForFit();
   /// Get number of values for a given domain.
   virtual size_t getValuesSize(const FunctionDomain &domain) const;
   /// Get number of domains required by this function
@@ -536,6 +545,18 @@ public:
   /// Return the handler
   FunctionHandler *getHandler() const { return m_handler; }
 
+  /// Describe parameter status in relation to fitting:
+  /// Active: Fit varies such parameter directly.
+  /// Fixed:  Value doesn't change during fit.
+  /// FixedByDefault:  Fixed by default, don't show in ties of
+  ///         the output string.
+  /// Tied:   Value depends on values of other parameters.
+  enum ParameterStatus { Active, Fixed, FixedByDefault, Tied };
+  /// Change status of parameter
+  virtual void setParameterStatus(size_t i, ParameterStatus status) = 0;
+  /// Get status of parameter
+  virtual ParameterStatus getParameterStatus(size_t i) const = 0;
+
 protected:
   /// Function initialization. Declare function parameters in this method.
   virtual void init();
@@ -566,15 +587,13 @@ protected:
   /// A read-only ("mutable") attribute can be stored in a const method
   void storeReadOnlyAttribute(const std::string &name,
                               const API::IFunction::Attribute &value) const;
-
-  /// Write a parameter tie to a string
-  virtual std::string writeTie(size_t iParam) const;
-  /// Write a parameter constraint to a string
-  virtual std::string writeConstraint(size_t iParam) const;
+  /// Add a new tie. Derived classes must provide storage for ties
+  virtual void addTie(std::unique_ptr<ParameterTie> tie);
 
   friend class ParameterTie;
   friend class CompositeFunction;
   friend class FunctionParameterDecorator;
+  friend class FunctionGenerator;
 
   /// Flag to hint that the function is being used in parallel computations
   bool m_isParallel;
@@ -583,7 +602,7 @@ protected:
   FunctionHandler *m_handler;
 
   /// Pointer to the progress handler
-  Kernel::ProgressBase *m_progReporter;
+  boost::shared_ptr<Kernel::ProgressBase> m_progReporter;
 
 private:
   /// The declared attributes
@@ -592,6 +611,10 @@ private:
   boost::shared_ptr<Kernel::Matrix<double>> m_covar;
   /// The chi-squared of the last fit
   double m_chiSquared;
+  /// Holds parameter ties as <parameter index,tie pointer>
+  std::vector<std::unique_ptr<ParameterTie>> m_ties;
+  /// Holds the constraints added to function
+  std::vector<std::unique_ptr<IConstraint>> m_constraints;
 };
 
 /// shared pointer to the function base class
diff --git a/Framework/API/inc/MantidAPI/IFunctionWithLocation.h b/Framework/API/inc/MantidAPI/IFunctionWithLocation.h
index 4582738c2b42d66cec85c0530d22dd3374fce128..624e0647a04c516c641dd3952f744106f9624a82 100644
--- a/Framework/API/inc/MantidAPI/IFunctionWithLocation.h
+++ b/Framework/API/inc/MantidAPI/IFunctionWithLocation.h
@@ -67,7 +67,10 @@ public:
 
   /// Fix a parameter or set up a tie such that value returned
   /// by centre() is constant during fitting.
-  virtual void fixCentre() {
+  /// @param isDefault :: If true fix centre by default:
+  ///    don't show it in ties
+  virtual void fixCentre(bool isDefault = false) {
+    UNUSED_ARG(isDefault);
     throw std::runtime_error(
         "Generic centre fixing isn't implemented for this function.");
   }
diff --git a/Framework/API/inc/MantidAPI/IPeakFunction.h b/Framework/API/inc/MantidAPI/IPeakFunction.h
index 3636919b41686ed772c00792659cea37e199d881..16984f0a6c180b63fe788bfec68c5ebd191cf4fb 100644
--- a/Framework/API/inc/MantidAPI/IPeakFunction.h
+++ b/Framework/API/inc/MantidAPI/IPeakFunction.h
@@ -79,7 +79,10 @@ public:
 
   /// Fix a parameter or set up a tie such that value returned
   /// by intensity() is constant during fitting.
-  virtual void fixIntensity() {
+  /// @param isDefault :: If true fix intensity by default:
+  ///    don't show it in ties
+  virtual void fixIntensity(bool isDefault = false) {
+    UNUSED_ARG(isDefault);
     throw std::runtime_error(
         "Generic intensity fixing isn't implemented for this function.");
   }
diff --git a/Framework/API/inc/MantidAPI/ParamFunction.h b/Framework/API/inc/MantidAPI/ParamFunction.h
index 21fe0a87fb8b3f753f55a79454b3a8a975640bb9..c6c3783ffcc8b75217c6c243fd870ce4a7133cc8 100644
--- a/Framework/API/inc/MantidAPI/ParamFunction.h
+++ b/Framework/API/inc/MantidAPI/ParamFunction.h
@@ -50,8 +50,6 @@ class MANTID_API_DLL ParamFunction : public virtual IFunction {
 public:
   /// Default constructor
   ParamFunction() {}
-  /// Virtual destructor
-  ~ParamFunction() override;
 
   /// Set i-th parameter
   void setParameter(size_t, const double &value,
@@ -83,13 +81,6 @@ public:
   /// Set the fitting error for a parameter
   void setError(size_t i, double err) override;
 
-  /// Check if a declared parameter i is active
-  bool isFixed(size_t i) const override;
-  /// Removes a declared parameter i from the list of active
-  void fix(size_t i) override;
-  /// Restores a declared parameter i to the active status
-  void unfix(size_t i) override;
-
   /// Return parameter index from a parameter reference. Usefull for constraints
   /// and ties in composite functions
   size_t getParameterIndex(const ParameterReference &ref) const override;
@@ -98,53 +89,37 @@ public:
   /// Get the containing function
   IFunction_sptr getContainingFunction(IFunction_sptr fun);
 
-  /// Apply the ties
-  void applyTies() override;
-  /// Remove all ties
-  void clearTies() override;
-  void removeTie(const std::string &parName) override {
-    IFunction::removeTie(parName);
-  }
-  /// Removes i-th parameter's tie
-  bool removeTie(size_t i) override;
-  /// Get the tie of i-th parameter
-  ParameterTie *getTie(size_t i) const override;
-  /// Add a new tie
-  void addTie(std::unique_ptr<ParameterTie> tie) override;
-
-  /// Add a constraint to function
-  void addConstraint(std::unique_ptr<IConstraint> ic) override;
-  /// Get constraint of i-th parameter
-  IConstraint *getConstraint(size_t i) const override;
-  /// Remove a constraint
-  void removeConstraint(const std::string &parName) override;
-  /// Set parameters to satisfy constraints
-  void setUpForFit() override;
-
 protected:
   /// Declare a new parameter
   void declareParameter(const std::string &name, double initValue = 0,
                         const std::string &description = "") override;
-
   /// Get the address of the parameter. For use in UserFunction with mu::Parser
   virtual double *getParameterAddress(size_t i);
-
   /// Nonvirtual member which removes all declared parameters
   void clearAllParameters();
+  /// Change status of parameter
+  void setParameterStatus(size_t i, ParameterStatus status) override;
+  /// Get status of parameter
+  ParameterStatus getParameterStatus(size_t i) const override;
 
 private:
-  /// The index map. m_indexMap[i] gives the total index for active parameter i
-  std::vector<bool> m_isFixed;
+  /// Check that a parameter index is in a valid range.
+  /// @param i :: Index to check.
+  inline void checkParameterIndex(size_t i) const {
+    if (i >= nParams()) {
+      throw std::out_of_range("ParamFunction parameter index " +
+                              std::to_string(i) + " out of range " +
+                              std::to_string(nParams()));
+    }
+  }
+  /// Keeps status for each parameter.
+  std::vector<ParameterStatus> m_parameterStatus;
   /// Keeps parameter names
   std::vector<std::string> m_parameterNames;
   /// Keeps parameter values
   std::vector<double> m_parameters;
   /// Keeps parameter errors
   std::vector<double> m_errors;
-  /// Holds parameter ties as <parameter index,tie pointer>
-  std::vector<std::unique_ptr<ParameterTie>> m_ties;
-  /// Holds the constraints added to function
-  std::vector<std::unique_ptr<IConstraint>> m_constraints;
   /// Flags of explicitly set parameters
   std::vector<bool> m_explicitlySet;
   /// parameter descriptions
diff --git a/Framework/API/inc/MantidAPI/ParameterReference.h b/Framework/API/inc/MantidAPI/ParameterReference.h
index 12b77ea8a31a329b4cc721687985c5ada679561c..d8a431574c2c96d2639565712a06c196158a218f 100644
--- a/Framework/API/inc/MantidAPI/ParameterReference.h
+++ b/Framework/API/inc/MantidAPI/ParameterReference.h
@@ -5,14 +5,11 @@
 // Includes
 //----------------------------------------------------------------------
 #include "MantidAPI/DllConfig.h"
-#include "MantidAPI/IFunction.h"
-
-namespace mu {
-class Parser;
-}
+#include <string>
 
 namespace Mantid {
 namespace API {
+class IFunction;
 /**
     A reference to a parameter in a function. To uniquely identify a parameter
     in a composite function
@@ -45,20 +42,30 @@ class MANTID_API_DLL ParameterReference {
 public:
   ParameterReference();
   ParameterReference(IFunction *fun, std::size_t index, bool isDefault = false);
-  std::size_t getIndex() const;
-  void reset(IFunction *fun, std::size_t index, bool isDefault = false);
-  void setParameter(const double &value);
+  void setParameter(const double &value, bool isExplicitlySet = true);
   double getParameter() const;
-  IFunction *getFunction() const;
   bool isDefault() const;
+  bool isParameterOf(const IFunction *fun) const;
   virtual ~ParameterReference() = default;
+  IFunction *getLocalFunction() const;
+  std::size_t getLocalIndex() const;
+  std::size_t parameterIndex() const;
+  std::string parameterName() const;
+
+protected:
+  void reset(IFunction *fun, std::size_t index, bool isDefault = false);
 
 private:
-  IFunction *m_function; ///< pointer to the function
-  std::size_t m_index;   ///< parameter index
+  /// Function-owner of this reference. parameterName() and parameterIndex()
+  /// return values relative to this function.
+  IFunction *m_owner;
+  /// Function that together with m_index uniquely identify the parameter.
+  IFunction *m_function;
+  /// Index of the parameter in m_function. It is assumed that this index
+  /// uniquely identifies the parameter withing m_function
+  std::size_t m_index;
   /// Flag to mark as default the value of an object associated with this
-  /// reference:
-  /// a tie or a constraint.
+  /// reference: a tie or a constraint.
   bool m_isDefault;
 };
 
diff --git a/Framework/API/inc/MantidAPI/ParameterTie.h b/Framework/API/inc/MantidAPI/ParameterTie.h
index 6525e08764aeeb99236c60a5db6287b34869e972..3c3586b0cbd8b21adafc61a0694bcb6fea4b33ab 100644
--- a/Framework/API/inc/MantidAPI/ParameterTie.h
+++ b/Framework/API/inc/MantidAPI/ParameterTie.h
@@ -5,8 +5,8 @@
 // Includes
 //----------------------------------------------------------------------
 #include "MantidAPI/DllConfig.h"
-#include "MantidAPI/IFunction.h"
 #include "MantidAPI/ParameterReference.h"
+#include <map>
 
 namespace mu {
 class Parser;
diff --git a/Framework/API/inc/MantidAPI/NearestNeighbourInfo.h b/Framework/API/inc/MantidAPI/WorkspaceNearestNeighbourInfo.h
similarity index 75%
rename from Framework/API/inc/MantidAPI/NearestNeighbourInfo.h
rename to Framework/API/inc/MantidAPI/WorkspaceNearestNeighbourInfo.h
index 894c8c43e130d3a1b806f36936245fb558966757..a4fe24169d07fd1187f39274ca88d33d1e5273be 100644
--- a/Framework/API/inc/MantidAPI/NearestNeighbourInfo.h
+++ b/Framework/API/inc/MantidAPI/WorkspaceNearestNeighbourInfo.h
@@ -15,10 +15,10 @@ class IDetector;
 namespace API {
 
 class MatrixWorkspace;
-class NearestNeighbours;
+class WorkspaceNearestNeighbours;
 
-/** NearestNeighbourInfo provides easy access to nearest-neighbour information
-  for a workspace.
+/** WorkspaceNearestNeighbourInfo provides easy access to nearest-neighbour
+  information for a workspace.
 
   Copyright &copy; 2016 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge
   National Laboratory & European Spallation Source
@@ -41,12 +41,12 @@ class NearestNeighbours;
   File change history is stored at: <https://github.com/mantidproject/mantid>
   Code Documentation is available at: <http://doxygen.mantidproject.org>
 */
-class MANTID_API_DLL NearestNeighbourInfo {
+class MANTID_API_DLL WorkspaceNearestNeighbourInfo {
 public:
-  NearestNeighbourInfo(const MatrixWorkspace &workspace,
-                       const bool ignoreMaskedDetectors,
-                       const int nNeighbours = 8);
-  ~NearestNeighbourInfo();
+  WorkspaceNearestNeighbourInfo(const MatrixWorkspace &workspace,
+                                const bool ignoreMaskedDetectors,
+                                const int nNeighbours = 8);
+  ~WorkspaceNearestNeighbourInfo();
 
   std::map<specnum_t, Kernel::V3D>
   getNeighbours(const Geometry::IDetector *comp,
@@ -57,10 +57,10 @@ public:
 
 private:
   const MatrixWorkspace &m_workspace;
-  std::unique_ptr<NearestNeighbours> m_nearestNeighbours;
+  std::unique_ptr<WorkspaceNearestNeighbours> m_nearestNeighbours;
 };
 
 } // namespace API
 } // namespace Mantid
 
-#endif /* MANTID_API_NEARESTNEIGHBOURINFO_H_ */
+#endif /* MANTID_API_WORKSPACENEARESTNEIGHBOURINFO_H_ */
diff --git a/Framework/API/inc/MantidAPI/NearestNeighbours.h b/Framework/API/inc/MantidAPI/WorkspaceNearestNeighbours.h
similarity index 92%
rename from Framework/API/inc/MantidAPI/NearestNeighbours.h
rename to Framework/API/inc/MantidAPI/WorkspaceNearestNeighbours.h
index cbe5525b5624ae76218e498894b212283735bacf..218c656f74827930156b2eae93168776e8dfd46a 100644
--- a/Framework/API/inc/MantidAPI/NearestNeighbours.h
+++ b/Framework/API/inc/MantidAPI/WorkspaceNearestNeighbours.h
@@ -19,7 +19,8 @@ class IDetector;
 namespace API {
 class SpectrumInfo;
 /**
- * This class is not intended for direct use. Use NearestNeighbourInfo instead!
+ * This class is not intended for direct use. Use WorkspaceNearestNeighbourInfo
+ * instead!
  *
  * This class is used to find the nearest neighbours of a detector in the
  * instrument geometry. This class can be queried through calls to the
@@ -58,11 +59,11 @@ class SpectrumInfo;
  *  File change history is stored at: <https://github.com/mantidproject/mantid>
  *  Code Documentation is available at: <http://doxygen.mantidproject.org>
  */
-class MANTID_API_DLL NearestNeighbours {
+class MANTID_API_DLL WorkspaceNearestNeighbours {
 public:
-  NearestNeighbours(int nNeighbours, const SpectrumInfo &spectrumInfo,
-                    std::vector<specnum_t> spectrumNumbers,
-                    bool ignoreMaskedDetectors = false);
+  WorkspaceNearestNeighbours(int nNeighbours, const SpectrumInfo &spectrumInfo,
+                             std::vector<specnum_t> spectrumNumbers,
+                             bool ignoreMaskedDetectors = false);
 
   // Neighbouring spectra by radius
   std::map<specnum_t, Mantid::Kernel::V3D>
diff --git a/Framework/API/src/CompositeFunction.cpp b/Framework/API/src/CompositeFunction.cpp
index 0608adeec8aca3097b0a1c20da4e9db03046f940..0111d46458d5ecb801bda6f1aee4090aeb6043af 100644
--- a/Framework/API/src/CompositeFunction.cpp
+++ b/Framework/API/src/CompositeFunction.cpp
@@ -7,6 +7,7 @@
 #include "MantidAPI/FunctionFactory.h"
 #include "MantidKernel/Exception.h"
 #include "MantidKernel/Logger.h"
+#include "MantidKernel/Strings.h"
 
 #include <boost/lexical_cast.hpp>
 #include <boost/shared_array.hpp>
@@ -89,26 +90,21 @@ std::string CompositeFunction::asString() const {
       ostr << ';';
     }
   }
-  std::string ties;
-  for (size_t i = 0; i < nParams(); i++) {
-    const ParameterTie *tie = getTie(i);
-    if (tie) {
-      IFunction_sptr fun = getFunction(functionIndex(i));
-      std::string tmp = tie->asString(fun.get());
-      if (tmp.empty()) {
-        tmp = tie->asString(this);
-        if (!tmp.empty()) {
-          if (!ties.empty()) {
-            ties += ",";
-          }
-          ties += tmp;
-        }
-      }
-    }
+
+  // collect non-default constraints
+  std::string constraints = writeConstraints();
+  // print constraints
+  if (!constraints.empty()) {
+    ostr << ";constraints=(" << constraints << ")";
   }
+
+  // collect the non-default ties
+  std::string ties = writeTies();
+  // print the ties
   if (!ties.empty()) {
     ostr << ";ties=(" << ties << ")";
   }
+
   return ostr.str();
 }
 
@@ -329,40 +325,18 @@ std::string CompositeFunction::descriptionOfActive(size_t i) const {
   return ostr.str();
 }
 
-/**
- * query to see in the function is active
- * @param i :: The index of a declared parameter
- * @return true if parameter i is active
- */
-bool CompositeFunction::isActive(size_t i) const {
-  size_t iFun = functionIndex(i);
-  return m_functions[iFun]->isActive(i - m_paramOffsets[iFun]);
-}
-
-/**
- * query to see in the function is active
- * @param i :: The index of a declared parameter
- * @return true if parameter i is active
- */
-bool CompositeFunction::isFixed(size_t i) const {
+/// Change status of parameter
+void CompositeFunction::setParameterStatus(size_t i,
+                                           IFunction::ParameterStatus status) {
   size_t iFun = functionIndex(i);
-  return m_functions[iFun]->isFixed(i - m_paramOffsets[iFun]);
+  m_functions[iFun]->setParameterStatus(i - m_paramOffsets[iFun], status);
 }
 
-/**
- * @param i :: A declared parameter index to be removed from active
- */
-void CompositeFunction::fix(size_t i) {
-  size_t iFun = functionIndex(i);
-  m_functions[iFun]->fix(i - m_paramOffsets[iFun]);
-}
-
-/** Makes a parameter active again. It doesn't change the parameter's tie.
- * @param i :: A declared parameter index to be restored to active
- */
-void CompositeFunction::unfix(size_t i) {
+/// Get status of parameter
+IFunction::ParameterStatus
+CompositeFunction::getParameterStatus(size_t i) const {
   size_t iFun = functionIndex(i);
-  m_functions[iFun]->unfix(i - m_paramOffsets[iFun]);
+  return m_functions[iFun]->getParameterStatus(i - m_paramOffsets[iFun]);
 }
 
 /** Makes sure that the function is consistent.
@@ -423,7 +397,7 @@ void CompositeFunction::removeFunction(size_t i) {
   }
 
   IFunction_sptr fun = getFunction(i);
-
+  // Reduction in parameters
   size_t dnp = fun->nParams();
 
   for (size_t j = 0; j < nParams();) {
@@ -620,12 +594,14 @@ void CompositeFunction::applyTies() {
   for (size_t i = 0; i < nFunctions(); i++) {
     getFunction(i)->applyTies();
   }
+  IFunction::applyTies();
 }
 
 /**
  * Clear the ties.
  */
 void CompositeFunction::clearTies() {
+  IFunction::clearTies();
   for (size_t i = 0; i < nFunctions(); i++) {
     getFunction(i)->clearTies();
   }
@@ -636,9 +612,13 @@ void CompositeFunction::clearTies() {
  * @return True if successfull
  */
 bool CompositeFunction::removeTie(size_t i) {
-  size_t iFun = functionIndex(i);
-  bool res = m_functions[iFun]->removeTie(i - m_paramOffsets[iFun]);
-  return res;
+  bool foundAndRemovedTie = IFunction::removeTie(i);
+  if (!foundAndRemovedTie) {
+    size_t iFun = functionIndex(i);
+    bool res = m_functions[iFun]->removeTie(i - m_paramOffsets[iFun]);
+    return res;
+  }
+  return foundAndRemovedTie;
 }
 
 /** Get the tie of i-th parameter
@@ -646,18 +626,12 @@ bool CompositeFunction::removeTie(size_t i) {
  * @return A pointer to the tie.
  */
 ParameterTie *CompositeFunction::getTie(size_t i) const {
-  size_t iFun = functionIndex(i);
-  return m_functions[iFun]->getTie(i - m_paramOffsets[iFun]);
-}
-
-/**
- * Attaches a tie to this function. The attached tie is owned by the function.
- * @param tie :: A pointer to a new tie
- */
-void CompositeFunction::addTie(std::unique_ptr<ParameterTie> tie) {
-  size_t i = getParameterIndex(*tie);
-  size_t iFun = functionIndex(i);
-  m_functions[iFun]->addTie(std::move(tie));
+  auto tie = IFunction::getTie(i);
+  if (tie == nullptr) {
+    size_t iFun = functionIndex(i);
+    tie = m_functions[iFun]->getTie(i - m_paramOffsets[iFun]);
+  }
+  return tie;
 }
 
 /**
@@ -676,19 +650,11 @@ void CompositeFunction::declareParameter(const std::string &name,
       "CompositeFunction cannot not have its own parameters.");
 }
 
-/** Add a constraint
- *  @param ic :: Pointer to a constraint.
- */
-void CompositeFunction::addConstraint(std::unique_ptr<IConstraint> ic) {
-  size_t i = getParameterIndex(*ic);
-  size_t iFun = functionIndex(i);
-  getFunction(iFun)->addConstraint(std::move(ic));
-}
-
 /**
  * Prepare the function for a fit.
  */
 void CompositeFunction::setUpForFit() {
+  IFunction::setUpForFit();
   // set up the member functions
   for (size_t i = 0; i < nFunctions(); i++) {
     getFunction(i)->setUpForFit();
@@ -726,17 +692,27 @@ void CompositeFunction::setUpForFit() {
 /// @param i :: the index
 /// @return A pointer to the constraint
 IConstraint *CompositeFunction::getConstraint(size_t i) const {
-  size_t iFun = functionIndex(i);
-  return m_functions[iFun]->getConstraint(i - m_paramOffsets[iFun]);
+  auto constraint = IFunction::getConstraint(i);
+  if (constraint == nullptr) {
+    size_t iFun = functionIndex(i);
+    constraint = m_functions[iFun]->getConstraint(i - m_paramOffsets[iFun]);
+  }
+  return constraint;
 }
 
 /** Remove a constraint
  * @param parName :: The name of a parameter which constarint to remove.
  */
 void CompositeFunction::removeConstraint(const std::string &parName) {
-  size_t iPar = parameterIndex(parName);
-  size_t iFun = functionIndex(iPar);
-  getFunction(iFun)->removeConstraint(parameterLocalName(iPar));
+  auto i = parameterIndex(parName);
+  auto constraint = IFunction::getConstraint(i);
+  if (constraint != nullptr) {
+    IFunction::removeConstraint(parName);
+  } else {
+    size_t iPar = parameterIndex(parName);
+    size_t iFun = functionIndex(iPar);
+    getFunction(iFun)->removeConstraint(parameterLocalName(iPar));
+  }
 }
 
 /** Checks if a constraint has been explicitly set
@@ -756,8 +732,8 @@ bool CompositeFunction::isExplicitlySet(size_t i) const {
  */
 size_t
 CompositeFunction::getParameterIndex(const ParameterReference &ref) const {
-  if (ref.getFunction() == this && ref.getIndex() < nParams()) {
-    return ref.getIndex();
+  if (ref.getLocalFunction() == this && ref.getLocalIndex() < nParams()) {
+    return ref.getLocalIndex();
   }
   for (size_t iFun = 0; iFun < nFunctions(); iFun++) {
     IFunction_sptr fun = getFunction(iFun);
diff --git a/Framework/API/src/DetectorSearcher.cpp b/Framework/API/src/DetectorSearcher.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..f95258958694bd84316a044a7a054943f0ad052a
--- /dev/null
+++ b/Framework/API/src/DetectorSearcher.cpp
@@ -0,0 +1,256 @@
+#include "MantidAPI/DetectorSearcher.h"
+#include "MantidGeometry/Instrument/ReferenceFrame.h"
+#include "MantidKernel/ConfigService.h"
+#include "MantidKernel/NearestNeighbours.h"
+
+#include <tuple>
+
+using Mantid::Kernel::V3D;
+using Mantid::Geometry::InstrumentRayTracer;
+using Mantid::Geometry::IDetector;
+using Mantid::Geometry::ReferenceFrame;
+using namespace Mantid;
+using namespace Mantid::API;
+
+double getQSign() {
+  const auto convention =
+      Kernel::ConfigService::Instance().getString("Q.convention");
+  return (convention == "Crystallography") ? -1.0 : 1.0;
+}
+
+/** Create a new DetectorSearcher for the given instrument
+ *
+ * The search strategy will be determined in the constructor based on the
+ * given instrument geometry
+ *
+ * @param instrument :: the instrument to find detectors in
+ * @param detInfo :: the API::DetectorInfo object for this instrument
+ */
+DetectorSearcher::DetectorSearcher(Geometry::Instrument_const_sptr instrument,
+                                   const API::DetectorInfo &detInfo)
+    : m_usingFullRayTrace(instrument->containsRectDetectors() ==
+                          Geometry::Instrument::ContainsState::Full),
+      m_crystallography_convention(getQSign()), m_detInfo(detInfo),
+      m_instrument(instrument) {
+
+  /* Choose the search strategy to use
+   * If the instrument uses rectangular detectors (e.g. TOPAZ) then it is faster
+   * to run a full ray trace starting from the top of the instrument. This is
+   * due to the speed up of looking up a single pixel in the rectangular
+   * detector.
+   *
+   * If the instrument does not use rectangular detectors (e.g. WISH, CORELLI)
+   * then it is faster to use a nearest neighbour search to find the closest
+   * pixels, then check them for intersection.
+   * */
+  if (!m_usingFullRayTrace) {
+    createDetectorCache();
+  } else {
+    m_rayTracer = Kernel::make_unique<InstrumentRayTracer>(instrument);
+  }
+}
+
+/** Create a NearestNeighbours search tree for the current instrument
+ */
+void DetectorSearcher::createDetectorCache() {
+  std::vector<Eigen::Vector3d> points;
+  points.reserve(m_detInfo.size());
+  m_indexMap.reserve(m_detInfo.size());
+
+  const auto frame = m_instrument->getReferenceFrame();
+  auto beam = frame->vecPointingAlongBeam();
+  auto up = frame->vecPointingUp();
+  beam.normalize();
+
+  for (size_t pointNo = 0; pointNo < m_detInfo.size(); ++pointNo) {
+    if (m_detInfo.isMonitor(pointNo) || m_detInfo.isMasked(pointNo))
+      continue; // detector is a monitor or masked so don't use
+
+    // Calculate a unit Q vector for each detector
+    // This follows a method similar to that used in IntegrateEllipsoids
+    auto pos = m_detInfo.position(pointNo);
+    pos.normalize();
+    auto E1 = (pos - beam) * -m_crystallography_convention;
+    E1.normalize();
+
+    Eigen::Vector3d point(E1[0], E1[1], E1[2]);
+
+    // Ignore nonsensical points
+    if (point.hasNaN() || up.coLinear(beam, pos))
+      continue;
+
+    points.push_back(point);
+    m_indexMap.push_back(pointNo);
+  }
+
+  // create KDtree of cached detector Q vectors
+  m_detectorCacheSearch =
+      Kernel::make_unique<Kernel::NearestNeighbours<3>>(points);
+}
+
+/** Find the index of a detector given a vector in Qlab space
+ *
+ * If no detector is found the first parameter of the returned tuple is false
+ *
+ * @param q :: the Qlab vector to find a detector for
+ * @return tuple with data <detector found, detector index>
+ */
+DetectorSearcher::DetectorSearchResult
+DetectorSearcher::findDetectorIndex(const V3D &q) {
+  // quick check to see if this Q is valid
+  if (q.nullVector())
+    return std::make_tuple(false, 0);
+
+  // search using best strategy for current instrument
+  if (m_usingFullRayTrace) {
+    return searchUsingInstrumentRayTracing(q);
+  } else {
+    return searchUsingNearestNeighbours(q);
+  }
+}
+
+/** Find the index of a detector given a vector in Qlab space using a ray
+ * tracing search strategy
+ *
+ * If no detector is found the first parameter of the returned tuple is false
+ *
+ * @param q :: the Qlab vector to find a detector for
+ * @return tuple with data <detector found, detector index>
+ */
+DetectorSearcher::DetectorSearchResult
+DetectorSearcher::searchUsingInstrumentRayTracing(const V3D &q) {
+  const auto direction = convertQtoDirection(q);
+  m_rayTracer->traceFromSample(direction);
+  const auto det = m_rayTracer->getDetectorResult();
+
+  if (!det)
+    return std::make_tuple(false, 0);
+
+  const auto detIndex = m_detInfo.indexOf(det->getID());
+
+  if (m_detInfo.isMasked(detIndex) || m_detInfo.isMonitor(detIndex))
+    return std::make_tuple(false, 0);
+
+  return std::make_tuple(true, detIndex);
+}
+
+/** Find the index of a detector given a vector in Qlab space using a nearest
+ * neighbours search strategy
+ *
+ * If no detector is found the first parameter of the returned tuple is false
+ *
+ * @param q :: the Qlab vector to find a detector for
+ * @return tuple with data <detector found, detector index>
+ */
+DetectorSearcher::DetectorSearchResult
+DetectorSearcher::searchUsingNearestNeighbours(const V3D &q) {
+  const auto detectorDir = convertQtoDirection(q);
+  // find where this Q vector should intersect with "extended" space
+  const auto neighbours =
+      m_detectorCacheSearch->findNearest(Eigen::Vector3d(q[0], q[1], q[2]), 5);
+  if (neighbours.size() == 0)
+    return std::make_tuple(false, 0);
+
+  const auto result = checkInteceptWithNeighbours(detectorDir, neighbours);
+  const auto hitDetector = std::get<0>(result);
+  const auto index = std::get<1>(result);
+
+  if (hitDetector)
+    return std::make_tuple(true, m_indexMap[index]);
+
+  // Tube Gap Parameter specifically applies to tube instruments
+  if (!hitDetector && m_instrument->hasParameter("tube-gap")) {
+    return handleTubeGap(detectorDir, neighbours);
+  }
+
+  return std::make_tuple(false, 0);
+}
+
+/** Handle the tube-gap parameter in tube based instruments.
+ *
+ * This will check for interceptions with the nearest neighbours by "wiggling"
+ * the predicted detector direction slightly.
+ *
+ * @param detectorDir :: the predicted direction towards a detector
+ * @param neighbours :: the NearestNeighbour results to check interception with
+ * @return a detector search result with whether a detector was hit
+ */
+DetectorSearcher::DetectorSearchResult DetectorSearcher::handleTubeGap(
+    const V3D &detectorDir,
+    const Kernel::NearestNeighbours<3>::NearestNeighbourResults &neighbours) {
+  std::vector<double> gaps = m_instrument->getNumberParameter("tube-gap", true);
+  if (!gaps.empty()) {
+    const auto gap = static_cast<double>(gaps.front());
+    // try adding and subtracting tube-gap in 3 q dimensions to see if you can
+    // find detectors on each side of tube gap
+    for (int i = 0; i < 3; i++) {
+      auto gapDir = V3D(0., 0., 0.);
+      gapDir[i] = gap;
+
+      auto beam1 = detectorDir + gapDir;
+      const auto result1 = checkInteceptWithNeighbours(beam1, neighbours);
+      const auto hit1 = std::get<0>(result1);
+
+      auto beam2 = detectorDir - gapDir;
+      const auto result2 = checkInteceptWithNeighbours(beam2, neighbours);
+      const auto hit2 = std::get<0>(result2);
+
+      if (hit1 && hit2) {
+        // Set the detector to one of the neighboring pixels
+        return std::make_tuple(true, m_indexMap[std::get<1>(result1)]);
+      }
+    }
+  }
+
+  return std::make_tuple(false, 0);
+}
+
+/** Check whether the given direction in real space intersects with any of the
+ * k nearest neighbours
+ *
+ * @param direction :: real space direction vector
+ * @param neighbours :: vector of nearest neighbours to check
+ * @return tuple of <detector hit, index of correct index in m_IndexMap>
+ */
+std::tuple<bool, size_t> DetectorSearcher::checkInteceptWithNeighbours(
+    const V3D &direction,
+    const Kernel::NearestNeighbours<3>::NearestNeighbourResults &neighbours)
+    const {
+  Geometry::Track track(m_detInfo.samplePosition(), direction);
+  // Find which of the neighbours we actually intersect with
+  for (const auto &neighbour : neighbours) {
+    const auto index = std::get<1>(neighbour);
+    const auto &det = m_detInfo.detector(m_indexMap[index]);
+
+    Mantid::Geometry::BoundingBox bb;
+    if (!bb.doesLineIntersect(track))
+      continue;
+
+    const auto hitDetector = det.interceptSurface(track) > 0;
+    if (hitDetector)
+      return std::make_tuple(hitDetector, index);
+
+    track.reset(m_detInfo.samplePosition(), direction);
+  }
+
+  return std::make_tuple(false, 0);
+}
+
+/** Helper method to convert a vector in Qlab to a direction in detector space
+ *
+ * @param q :: a Qlab vector
+ * @return a direction in detector space
+ */
+V3D DetectorSearcher::convertQtoDirection(const V3D &q) const {
+  const auto norm_q = q.norm();
+  const auto refFrame = m_instrument->getReferenceFrame();
+  const V3D refBeamDir = refFrame->vecPointingAlongBeam();
+
+  const double qBeam = q.scalar_prod(refBeamDir) * m_crystallography_convention;
+  double one_over_wl = (norm_q * norm_q) / (2.0 * qBeam);
+
+  auto detectorDir = q * -m_crystallography_convention;
+  detectorDir[refFrame->pointingAlongBeam()] = one_over_wl - qBeam;
+  detectorDir.normalize();
+  return detectorDir;
+}
diff --git a/Framework/CurveFitting/src/Functions/FunctionGenerator.cpp b/Framework/API/src/FunctionGenerator.cpp
similarity index 69%
rename from Framework/CurveFitting/src/Functions/FunctionGenerator.cpp
rename to Framework/API/src/FunctionGenerator.cpp
index f8f4458efcac912f39009b8177e3991bec29fca7..3d3851fa74a82aebc9fc384af3a3c1984d641bb0 100644
--- a/Framework/CurveFitting/src/Functions/FunctionGenerator.cpp
+++ b/Framework/API/src/FunctionGenerator.cpp
@@ -1,19 +1,14 @@
-#include "MantidCurveFitting/Functions/FunctionGenerator.h"
+#include "MantidAPI/FunctionGenerator.h"
 #include "MantidAPI/IConstraint.h"
 #include "MantidAPI/ParameterTie.h"
 
 namespace Mantid {
-namespace CurveFitting {
-namespace Functions {
-
-using namespace CurveFitting;
+namespace API {
 
 using namespace Kernel;
 
-using namespace API;
-
 /// Constructor
-FunctionGenerator::FunctionGenerator(API::IFunction_sptr source)
+FunctionGenerator::FunctionGenerator(IFunction_sptr source)
     : m_source(source), m_nOwnParams(source->nParams()), m_dirty(true) {
   if (!m_source) {
     throw std::logic_error(
@@ -128,38 +123,33 @@ void FunctionGenerator::setError(size_t i, double err) {
   }
 }
 
-/// Check if a declared parameter i is fixed
-bool FunctionGenerator::isFixed(size_t i) const {
-  checkTargetFunction();
-  return i < m_nOwnParams ? m_source->isFixed(i)
-                          : m_target->isFixed(i - m_nOwnParams);
-}
-
-/// Removes a declared parameter i from the list of active
-void FunctionGenerator::fix(size_t i) {
+/// Change status of parameter
+void FunctionGenerator::setParameterStatus(size_t i,
+                                           IFunction::ParameterStatus status) {
   if (i < m_nOwnParams) {
-    m_source->fix(i);
+    m_source->setParameterStatus(i, status);
   } else {
     checkTargetFunction();
-    m_target->fix(i - m_nOwnParams);
+    m_target->setParameterStatus(i - m_nOwnParams, status);
   }
 }
 
-/// Restores a declared parameter i to the active status
-void FunctionGenerator::unfix(size_t i) {
+/// Get status of parameter
+IFunction::ParameterStatus
+FunctionGenerator::getParameterStatus(size_t i) const {
   if (i < m_nOwnParams) {
-    m_source->unfix(i);
+    return m_source->getParameterStatus(i);
   } else {
     checkTargetFunction();
-    m_target->unfix(i - m_nOwnParams);
+    return m_target->getParameterStatus(i - m_nOwnParams);
   }
 }
 
 /// Return parameter index from a parameter reference.
 size_t
 FunctionGenerator::getParameterIndex(const ParameterReference &ref) const {
-  if (ref.getFunction() == this) {
-    auto index = ref.getIndex();
+  if (ref.getLocalFunction() == this) {
+    auto index = ref.getLocalIndex();
     auto np = nParams();
     if (index < np) {
       return index;
@@ -170,90 +160,12 @@ FunctionGenerator::getParameterIndex(const ParameterReference &ref) const {
   return m_target->getParameterIndex(ref) + m_nOwnParams;
 }
 
-/// Tie a parameter to other parameters (or a constant)
-void FunctionGenerator::tie(const std::string &parName, const std::string &expr,
-                            bool isDefault) {
-  if (isSourceName(parName)) {
-    m_source->tie(parName, expr, isDefault);
-  } else {
-    checkTargetFunction();
-    m_target->tie(parName, expr, isDefault);
-  }
-}
-
-/// Apply the ties
-void FunctionGenerator::applyTies() {
-  m_source->applyTies();
+/// Set up the function for a fit.
+void FunctionGenerator::setUpForFit() {
   updateTargetFunction();
-  if (m_target) {
-    m_target->applyTies();
-  }
-}
-
-/// Remove all ties
-void FunctionGenerator::clearTies() {
-  m_source->clearTies();
-  if (m_target) {
-    m_target->clearTies();
-  }
-}
-
-/// Removes i-th parameter's tie
-bool FunctionGenerator::removeTie(size_t i) {
-  if (i < m_nOwnParams) {
-    return m_source->removeTie(i);
-  } else {
-    checkTargetFunction();
-    return m_target->removeTie(i - m_nOwnParams);
-  }
-}
-
-/// Get the tie of i-th parameter
-ParameterTie *FunctionGenerator::getTie(size_t i) const {
-  if (i < m_nOwnParams) {
-    return m_source->getTie(i);
-  } else {
-    checkTargetFunction();
-    return m_target->getTie(i - m_nOwnParams);
-  }
-}
-
-/// Add a constraint to function
-void FunctionGenerator::addConstraint(std::unique_ptr<API::IConstraint> ic) {
-  auto i = ic->getIndex();
-  if (i < m_nOwnParams) {
-    ic->reset(m_source.get(), i);
-    m_source->addConstraint(std::move(ic));
-  } else {
-    checkTargetFunction();
-    ic->reset(m_target.get(), i - m_nOwnParams);
-    m_target->addConstraint(std::move(ic));
-  }
+  IFunction::setUpForFit();
 }
 
-/// Get constraint of i-th parameter
-IConstraint *FunctionGenerator::getConstraint(size_t i) const {
-  if (i < m_nOwnParams) {
-    return m_source->getConstraint(i);
-  } else {
-    checkTargetFunction();
-    return m_target->getConstraint(i - m_nOwnParams);
-  }
-}
-
-/// Remove a constraint
-void FunctionGenerator::removeConstraint(const std::string &parName) {
-  if (isSourceName(parName)) {
-    m_source->removeConstraint(parName);
-  } else {
-    checkTargetFunction();
-    m_target->removeConstraint(parName);
-  }
-}
-
-/// Set up the function for a fit.
-void FunctionGenerator::setUpForFit() { updateTargetFunction(); }
-
 /// Declare a new parameter
 void FunctionGenerator::declareParameter(const std::string &, double,
                                          const std::string &) {
@@ -261,19 +173,6 @@ void FunctionGenerator::declareParameter(const std::string &, double,
       "FunctionGenerator cannot not have its own parameters.");
 }
 
-/// Add a new tie. Derived classes must provide storage for ties
-void FunctionGenerator::addTie(std::unique_ptr<API::ParameterTie> tie) {
-  size_t i = getParameterIndex(*tie);
-  if (i < m_nOwnParams) {
-    m_source->addTie(std::move(tie));
-  } else {
-    checkTargetFunction();
-    tie->reset(m_target.get(), tie->getIndex() - m_nOwnParams,
-               tie->isDefault());
-    m_target->addTie(std::move(tie));
-  }
-}
-
 /// Returns the number of attributes associated with the function
 size_t FunctionGenerator::nAttributes() const {
   checkTargetFunction();
@@ -293,7 +192,7 @@ std::vector<std::string> FunctionGenerator::getAttributeNames() const {
 }
 
 /// Return a value of attribute attName
-API::IFunction::Attribute
+IFunction::Attribute
 FunctionGenerator::getAttribute(const std::string &attName) const {
   if (IFunction::hasAttribute(attName)) {
     return IFunction::getAttribute(attName);
@@ -335,8 +234,8 @@ bool FunctionGenerator::hasAttribute(const std::string &attName) const {
 }
 
 // Evaluates the function
-void FunctionGenerator::function(const API::FunctionDomain &domain,
-                                 API::FunctionValues &values) const {
+void FunctionGenerator::function(const FunctionDomain &domain,
+                                 FunctionValues &values) const {
   updateTargetFunction();
   if (!m_target) {
     throw std::logic_error(
@@ -366,6 +265,34 @@ void FunctionGenerator::checkTargetFunction() const {
   }
 }
 
-} // namespace Functions
-} // namespace CurveFitting
+/// Get the tie for i-th parameter
+ParameterTie *FunctionGenerator::getTie(size_t i) const {
+  auto tie = IFunction::getTie(i);
+  if (!tie) {
+    return nullptr;
+  }
+  if (i < m_nOwnParams) {
+    tie = m_source->getTie(i);
+  } else {
+    checkTargetFunction();
+    tie = m_target->getTie(i - m_nOwnParams);
+  }
+  return tie;
+}
+
+/// Get the i-th constraint
+IConstraint *FunctionGenerator::getConstraint(size_t i) const {
+  auto constraint = IFunction::getConstraint(i);
+  if (constraint == nullptr) {
+    if (i < m_nOwnParams) {
+      constraint = m_source->getConstraint(i);
+    } else {
+      checkTargetFunction();
+      constraint = m_target->getConstraint(i - m_nOwnParams);
+    }
+  }
+  return constraint;
+}
+
+} // namespace API
 } // namespace Mantid
diff --git a/Framework/API/src/FunctionParameterDecorator.cpp b/Framework/API/src/FunctionParameterDecorator.cpp
index 4d37ae9545ca6951a86614a2588899f684077ecf..e3ccfc81e13c7137e9d76c88ba8fa8da6bdc1100 100644
--- a/Framework/API/src/FunctionParameterDecorator.cpp
+++ b/Framework/API/src/FunctionParameterDecorator.cpp
@@ -154,24 +154,6 @@ void FunctionParameterDecorator::setError(size_t i, double err) {
   return m_wrappedFunction->setError(i, err);
 }
 
-bool FunctionParameterDecorator::isFixed(size_t i) const {
-  throwIfNoFunctionSet();
-
-  return m_wrappedFunction->isFixed(i);
-}
-
-void FunctionParameterDecorator::fix(size_t i) {
-  throwIfNoFunctionSet();
-
-  m_wrappedFunction->fix(i);
-}
-
-void FunctionParameterDecorator::unfix(size_t i) {
-  throwIfNoFunctionSet();
-
-  m_wrappedFunction->unfix(i);
-}
-
 size_t FunctionParameterDecorator::getParameterIndex(
     const ParameterReference &ref) const {
   throwIfNoFunctionSet();
@@ -180,8 +162,8 @@ size_t FunctionParameterDecorator::getParameterIndex(
     return m_wrappedFunction->getParameterIndex(ref);
   }
 
-  if (ref.getFunction() == this && ref.getIndex() < nParams()) {
-    return ref.getIndex();
+  if (ref.getLocalFunction() == this && ref.getLocalIndex() < nParams()) {
+    return ref.getLocalIndex();
   }
 
   return nParams();
@@ -222,11 +204,16 @@ bool FunctionParameterDecorator::hasAttribute(
   return m_wrappedFunction->hasAttribute(attName);
 }
 
-void FunctionParameterDecorator::tie(const std::string &parName,
-                                     const std::string &expr, bool isDefault) {
+void FunctionParameterDecorator::setParameterStatus(
+    size_t i, IFunction::ParameterStatus status) {
   throwIfNoFunctionSet();
+  m_wrappedFunction->setParameterStatus(i, status);
+}
 
-  m_wrappedFunction->tie(parName, expr, isDefault);
+IFunction::ParameterStatus
+FunctionParameterDecorator::getParameterStatus(size_t i) const {
+  throwIfNoFunctionSet();
+  return m_wrappedFunction->getParameterStatus(i);
 }
 
 void FunctionParameterDecorator::applyTies() {
@@ -299,6 +286,12 @@ void FunctionParameterDecorator::declareParameter(
   UNUSED_ARG(description);
 }
 
+void FunctionParameterDecorator::tie(const std::string &parName,
+                                     const std::string &expr, bool isDefault) {
+  throwIfNoFunctionSet();
+  m_wrappedFunction->tie(parName, expr, isDefault);
+}
+
 /// Forwads addTie-call to the decorated function.
 void FunctionParameterDecorator::addTie(std::unique_ptr<ParameterTie> tie) {
   throwIfNoFunctionSet();
diff --git a/Framework/API/src/IFunction.cpp b/Framework/API/src/IFunction.cpp
index 36d32e4b4f84c7f8a70837fddadc1d2cfb7091dd..8885f0684a5d269dd32c312e656caafdeefe553e 100644
--- a/Framework/API/src/IFunction.cpp
+++ b/Framework/API/src/IFunction.cpp
@@ -66,7 +66,8 @@ boost::shared_ptr<IFunction> IFunction::clone() const {
  * @param reporter :: A pointer to a progress reporter that can be called during
  * function evaluation
  */
-void IFunction::setProgressReporter(Kernel::ProgressBase *reporter) {
+void IFunction::setProgressReporter(
+    boost::shared_ptr<Kernel::ProgressBase> reporter) {
   m_progReporter = reporter;
   m_progReporter->setNotifyStep(0.01);
 }
@@ -77,7 +78,7 @@ void IFunction::setProgressReporter(Kernel::ProgressBase *reporter) {
  */
 void IFunction::reportProgress(const std::string &msg) const {
   if (m_progReporter) {
-    const_cast<Kernel::ProgressBase *>(m_progReporter)->report(msg);
+    const_cast<Kernel::ProgressBase *>(m_progReporter.get())->report(msg);
   }
 }
 
@@ -103,6 +104,59 @@ void IFunction::functionDeriv(const FunctionDomain &domain,
   calNumericalDeriv(domain, jacobian);
 }
 
+/** Check if an active parameter i is actually active
+ * @param i :: Index of a parameter.
+ */
+bool IFunction::isActive(size_t i) const {
+  return getParameterStatus(i) == Active;
+}
+
+/**
+ * Query if the parameter is fixed
+ * @param i :: The index of a declared parameter
+ * @return true if parameter i is fixed
+ */
+bool IFunction::isFixed(size_t i) const {
+  auto status = getParameterStatus(i);
+  return status == Fixed || status == FixedByDefault;
+}
+
+/// Check if a parameter i is fixed by default (not by user).
+/// @param i :: The index of a parameter
+/// @return true if parameter i is fixed by default
+bool IFunction::isFixedByDefault(size_t i) const {
+  return getParameterStatus(i) == FixedByDefault;
+}
+
+/// This method doesn't create a tie
+/// @param i :: A declared parameter index to be fixed
+/// @param isDefault :: If true fix it by default
+///
+void IFunction::fix(size_t i, bool isDefault) {
+  auto status = getParameterStatus(i);
+  if (status == Tied) {
+    throw std::runtime_error("Cannot fix parameter " + std::to_string(i) +
+                             " (" + parameterName(i) + "): it has a tie.");
+  }
+  if (isDefault) {
+    setParameterStatus(i, FixedByDefault);
+  } else {
+    setParameterStatus(i, Fixed);
+  }
+}
+
+/** Makes a parameter active again. It doesn't change the parameter's tie.
+ * @param i :: A declared parameter index to be restored to active
+ */
+void IFunction::unfix(size_t i) {
+  auto status = getParameterStatus(i);
+  if (status == Tied) {
+    throw std::runtime_error("Cannot unfix parameter " + std::to_string(i) +
+                             " (" + parameterName(i) + "): it has a tie.");
+  }
+  setParameterStatus(i, Active);
+}
+
 /**
  * Ties a parameter to other parameters
  * @param parName :: The name of the parameter to tie.
@@ -114,13 +168,12 @@ void IFunction::functionDeriv(const FunctionDomain &domain,
 void IFunction::tie(const std::string &parName, const std::string &expr,
                     bool isDefault) {
   auto ti = Kernel::make_unique<ParameterTie>(this, parName, expr, isDefault);
-  this->fix(getParameterIndex(*ti));
   if (!isDefault && ti->isConstant()) {
     setParameter(parName, ti->eval());
+    fix(getParameterIndex(*ti));
   } else {
     addTie(std::move(ti));
   }
-  //  return ti.get();
 }
 
 /**
@@ -158,34 +211,187 @@ void IFunction::removeTie(const std::string &parName) {
   this->removeTie(i);
 }
 
-/// Write a parameter tie to a string
-/// @param iParam :: An index of a parameter.
+/// Write all parameter ties owned by this function to a string
 /// @return A tie string for the parameter.
-std::string IFunction::writeTie(size_t iParam) const {
+std::string IFunction::writeTies() const {
   std::ostringstream tieStream;
-  const ParameterTie *tie = getTie(iParam);
-  if (tie) {
-    if (!tie->isDefault()) {
-      tieStream << tie->asString(this);
+  bool first = true;
+  for (auto &tie : m_ties) {
+    if (tie->isDefault())
+      continue;
+    if (!first) {
+      tieStream << ',';
+    } else {
+      first = false;
     }
-  } else if (isFixed(iParam)) {
-    tieStream << parameterName(iParam) << "=" << getParameter(iParam);
+    tieStream << tie->asString(this);
   }
   return tieStream.str();
 }
 
-/// Write a parameter constraint to a string
-/// @param iParam :: An index of a parameter.
+/**
+ * Attaches a tie to this ParamFunction. The attached tie is owned by the
+ * ParamFunction.
+ * @param tie :: A pointer to a new tie
+ */
+void IFunction::addTie(std::unique_ptr<ParameterTie> tie) {
+
+  auto iPar = getParameterIndex(*tie);
+  bool found = false;
+  for (auto &m_tie : m_ties) {
+    auto mPar = getParameterIndex(*m_tie);
+    if (mPar == iPar) {
+      found = true;
+      m_tie = std::move(tie);
+      break;
+    }
+  }
+  if (!found) {
+    m_ties.push_back(std::move(tie));
+    setParameterStatus(iPar, Tied);
+  }
+}
+
+/**
+ * Apply the ties.
+ */
+void IFunction::applyTies() {
+  for (auto &m_tie : m_ties) {
+    m_tie->eval();
+  }
+}
+
+/**
+ * Used to find ParameterTie for a parameter i
+ */
+class ReferenceEqual {
+  /// The function that has the tie
+  const IFunction &m_fun;
+  /// index to find
+  const size_t m_i;
+
+public:
+  /// Constructor
+  explicit ReferenceEqual(const IFunction &fun, size_t i)
+      : m_fun(fun), m_i(i) {}
+  /// Bracket operator
+  /// @param p :: the element you are looking for
+  /// @return True if found
+  template <class T> bool operator()(const std::unique_ptr<T> &p) {
+    return m_fun.getParameterIndex(*p) == m_i;
+  }
+};
+
+/** Removes i-th parameter's tie if it is tied or does nothing.
+ * @param i :: The index of the tied parameter.
+ * @return True if successfull
+ */
+bool IFunction::removeTie(size_t i) {
+  if (i >= nParams()) {
+    throw std::out_of_range("Function parameter index out of range.");
+  }
+  auto it =
+      std::find_if(m_ties.begin(), m_ties.end(), ReferenceEqual(*this, i));
+  if (it != m_ties.end()) {
+    m_ties.erase(it);
+    setParameterStatus(i, Active);
+    return true;
+  }
+  unfix(i);
+  return false;
+}
+
+/** Get tie of parameter number i
+ * @param i :: The index of a declared parameter.
+ * @return A pointer to the tie
+ */
+ParameterTie *IFunction::getTie(size_t i) const {
+  auto it =
+      std::find_if(m_ties.cbegin(), m_ties.cend(), ReferenceEqual(*this, i));
+  if (it != m_ties.cend()) {
+    return it->get();
+  }
+  return nullptr;
+}
+
+/** Remove all ties
+ */
+void IFunction::clearTies() {
+  for (size_t i = 0; i < nParams(); ++i) {
+    setParameterStatus(i, Active);
+  }
+  m_ties.clear();
+}
+
+/** Add a constraint
+ *  @param ic :: Pointer to a constraint.
+ */
+void IFunction::addConstraint(std::unique_ptr<IConstraint> ic) {
+  size_t iPar = ic->parameterIndex();
+  bool found = false;
+  for (auto &constraint : m_constraints) {
+    if (constraint->parameterIndex() == iPar) {
+      found = true;
+      constraint = std::move(ic);
+      break;
+    }
+  }
+  if (!found) {
+    m_constraints.push_back(std::move(ic));
+  }
+}
+
+/** Get constraint of parameter number i
+ * @param i :: The index of a declared parameter.
+ * @return A pointer to the constraint or NULL
+ */
+IConstraint *IFunction::getConstraint(size_t i) const {
+  auto it = std::find_if(m_constraints.cbegin(), m_constraints.cend(),
+                         ReferenceEqual(*this, i));
+  if (it != m_constraints.cend()) {
+    return it->get();
+  }
+  return nullptr;
+}
+
+/** Remove a constraint
+ * @param parName :: The name of a parameter which constarint to remove.
+ */
+void IFunction::removeConstraint(const std::string &parName) {
+  size_t iPar = parameterIndex(parName);
+  for (auto it = m_constraints.begin(); it != m_constraints.end(); ++it) {
+    if (iPar == (**it).getLocalIndex()) {
+      m_constraints.erase(it);
+      break;
+    }
+  }
+}
+
+/// Remove all constraints.
+void IFunction::clearConstraints() { m_constraints.clear(); }
+
+void IFunction::setUpForFit() {
+  for (auto &constraint : m_constraints) {
+    constraint->setParamToSatisfyConstraint();
+  }
+}
+
+/// Write all parameter constraints owned by this function to a string
 /// @return A constraint string for the parameter.
-std::string IFunction::writeConstraint(size_t iParam) const {
-  const IConstraint *c = getConstraint(iParam);
-  if (c && !c->isDefault()) {
-    std::string constraint = c->asString();
-    if (!constraint.empty()) {
-      return constraint;
+std::string IFunction::writeConstraints() const {
+  std::ostringstream stream;
+  bool first = true;
+  for (auto &constrint : m_constraints) {
+    if (constrint->isDefault())
+      continue;
+    if (!first) {
+      stream << ',';
+    } else {
+      first = false;
     }
+    stream << constrint->asString();
   }
-  return "";
+  return stream.str();
 }
 
 /**
@@ -204,33 +410,29 @@ std::string IFunction::asString() const {
       ostr << ',' << attName << '=' << attValue;
     }
   }
+  std::vector<std::string> ties;
   // print the parameters
   for (size_t i = 0; i < nParams(); i++) {
-    ostr << ',' << parameterName(i) << '=' << getParameter(i);
+    std::ostringstream paramOut;
+    paramOut << parameterName(i) << '=' << getParameter(i);
+    ostr << ',' << paramOut.str();
+    // Output non-default ties only.
+    if (getParameterStatus(i) == Fixed) {
+      ties.push_back(paramOut.str());
+    }
   }
 
   // collect non-default constraints
-  std::vector<std::string> constraints;
-  for (size_t i = 0; i < nParams(); i++) {
-    auto constraint = writeConstraint(i);
-    if (!constraint.empty()) {
-      constraints.push_back(constraint);
-    }
-  }
+  std::string constraints = writeConstraints();
   // print constraints
   if (!constraints.empty()) {
-    ostr << ",constraints=("
-         << Kernel::Strings::join(constraints.begin(), constraints.end(), ",")
-         << ")";
+    ostr << ",constraints=(" << constraints << ")";
   }
 
   // collect the non-default ties
-  std::vector<std::string> ties;
-  for (size_t i = 0; i < nParams(); i++) {
-    auto tie = writeTie(i);
-    if (!tie.empty()) {
-      ties.push_back(tie);
-    }
+  auto tiesString = writeTies();
+  if (!tiesString.empty()) {
+    ties.push_back(tiesString);
   }
   // print the ties
   if (!ties.empty()) {
@@ -370,6 +572,13 @@ private:
 };
 }
 
+/// Copy assignment. Do not copy m_quoteValue flag.
+/// @param attr :: The attribute to copy from.
+IFunction::Attribute &IFunction::Attribute::operator=(const Attribute &attr) {
+  m_data = attr.m_data;
+  return *this;
+}
+
 std::string IFunction::Attribute::value() const {
   AttValue tmp(m_quoteValue);
   return apply(tmp);
@@ -1149,9 +1358,10 @@ size_t IFunction::getValuesSize(const FunctionDomain &domain) const {
 
 /// Fix a parameter
 /// @param name :: A name of a parameter to fix
-void IFunction::fixParameter(const std::string &name) {
+/// @param isDefault :: If true fix it by default
+void IFunction::fixParameter(const std::string &name, bool isDefault) {
   auto i = parameterIndex(name);
-  fix(i);
+  fix(i, isDefault);
 }
 
 /// Free a parameter
@@ -1162,14 +1372,41 @@ void IFunction::unfixParameter(const std::string &name) {
 }
 
 /// Fix all parameters
-void IFunction::fixAll() {
+/// @param isDefault :: If true fix them by default
+void IFunction::fixAll(bool isDefault) {
   for (size_t i = 0; i < nParams(); ++i) {
-    fix(i);
+    fix(i, isDefault);
   }
 }
 
 /// Free all parameters
-void IFunction::unfixAll() { clearTies(); }
+void IFunction::unfixAll() {
+  for (size_t i = 0; i < nParams(); ++i) {
+    unfix(i);
+  }
+}
+
+/// Free all parameters fixed by default
+void IFunction::unfixAllDefault() {
+  for (size_t i = 0; i < nParams(); ++i) {
+    if (getParameterStatus(i) == FixedByDefault) {
+      unfix(i);
+    }
+  }
+}
+
+/// Fix all active parameters. This method doesn't change
+/// status of a fixed parameter, eg if one was fixed by default
+/// prior to calling this method it will remain default regardless
+/// the value of isDefault argument.
+/// @param isDefault :: If true fix them by default.
+void IFunction::fixAllActive(bool isDefault) {
+  for (size_t i = 0; i < nParams(); ++i) {
+    if (getParameterStatus(i) == Active) {
+      fix(i, isDefault);
+    }
+  }
+}
 
 /// Get number of domains required by this function.
 /// If it returns a number greater than 1 then the domain
diff --git a/Framework/API/src/ParamFunction.cpp b/Framework/API/src/ParamFunction.cpp
index 3c7c628e78eb5b0adc88b326f61cfdeee31e5760..70bdd35973503e3f8a18f474b9e82dae232b33dc 100644
--- a/Framework/API/src/ParamFunction.cpp
+++ b/Framework/API/src/ParamFunction.cpp
@@ -15,12 +15,6 @@ namespace {
 Kernel::Logger g_log("ParamFunction");
 }
 
-/// Destructor
-ParamFunction::~ParamFunction() {
-  m_ties.clear();
-  m_constraints.clear();
-}
-
 /** Sets a new value to the i-th parameter.
  *  @param i :: The parameter index
  *  @param value :: The new value
@@ -43,9 +37,7 @@ void ParamFunction::setParameter(size_t i, const double &value,
     g_log.warning(errmsg.str());
   }
 
-  if (i >= nParams()) {
-    throw std::out_of_range("ParamFunction parameter index out of range.");
-  }
+  checkParameterIndex(i);
   if (explicitlySet && value != m_parameters[i]) {
     m_explicitlySet[i] = true;
   }
@@ -58,9 +50,7 @@ void ParamFunction::setParameter(size_t i, const double &value,
  */
 void ParamFunction::setParameterDescription(size_t i,
                                             const std::string &description) {
-  if (i >= nParams()) {
-    throw std::out_of_range("ParamFunction parameter index out of range.");
-  }
+  checkParameterIndex(i);
   m_parameterDescriptions[i] = description;
 }
 
@@ -69,9 +59,7 @@ void ParamFunction::setParameterDescription(size_t i,
  *  @return the value of the requested parameter
  */
 double ParamFunction::getParameter(size_t i) const {
-  if (i >= nParams()) {
-    throw std::out_of_range("ParamFunction parameter index out of range.");
-  }
+  checkParameterIndex(i);
   return m_parameters[i];
 }
 
@@ -170,9 +158,7 @@ size_t ParamFunction::parameterIndex(const std::string &name) const {
  * @return the name of the parameter at the requested index
  */
 std::string ParamFunction::parameterName(size_t i) const {
-  if (i >= nParams()) {
-    throw std::out_of_range("ParamFunction parameter index out of range.");
-  }
+  checkParameterIndex(i);
   return m_parameterNames[i];
 }
 
@@ -181,9 +167,7 @@ std::string ParamFunction::parameterName(size_t i) const {
  * @return the description of the parameter at the requested index
  */
 std::string ParamFunction::parameterDescription(size_t i) const {
-  if (i >= nParams()) {
-    throw std::out_of_range("ParamFunction parameter index out of range.");
-  }
+  checkParameterIndex(i);
   return m_parameterDescriptions[i];
 }
 
@@ -193,9 +177,7 @@ std::string ParamFunction::parameterDescription(size_t i) const {
  * @return :: the error
  */
 double ParamFunction::getError(size_t i) const {
-  if (i >= nParams()) {
-    throw std::out_of_range("ParamFunction parameter index out of range.");
-  }
+  checkParameterIndex(i);
   return m_errors[i];
 }
 
@@ -205,9 +187,7 @@ double ParamFunction::getError(size_t i) const {
  * @param err :: The error value to set
  */
 void ParamFunction::setError(size_t i, double err) {
-  if (i >= nParams()) {
-    throw std::out_of_range("ParamFunction parameter index out of range.");
-  }
+  checkParameterIndex(i);
   m_errors[i] = err;
 }
 
@@ -226,7 +206,7 @@ void ParamFunction::declareParameter(const std::string &name, double initValue,
     throw std::invalid_argument(msg.str());
   }
 
-  m_isFixed.push_back(false);
+  m_parameterStatus.push_back(Active);
   m_parameterNames.push_back(name);
   m_parameterDescriptions.push_back(description);
   m_parameters.push_back(initValue);
@@ -234,202 +214,43 @@ void ParamFunction::declareParameter(const std::string &name, double initValue,
   m_explicitlySet.push_back(false);
 }
 
-/**
- * query if the parameter is fixed
- * @param i :: The index of a declared parameter
- * @return true if parameter i is active
- */
-bool ParamFunction::isFixed(size_t i) const {
-  if (i >= nParams())
-    throw std::out_of_range("ParamFunction parameter index out of range.");
-  return m_isFixed[i];
-}
-
-/** This method doesn't create a tie
- * @param i :: A declared parameter index to be fixed
- */
-void ParamFunction::fix(size_t i) {
-  if (isFixed(i))
-    return;
-  m_isFixed[i] = true;
-}
-
-/** Makes a parameter active again. It doesn't change the parameter's tie.
- * @param i :: A declared parameter index to be restored to active
- */
-void ParamFunction::unfix(size_t i) {
-  if (!isFixed(i))
-    return;
-  m_isFixed[i] = false;
-}
-
-/**
- * Attaches a tie to this ParamFunction. The attached tie is owned by the
- * ParamFunction.
- * @param tie :: A pointer to a new tie
- */
-void ParamFunction::addTie(std::unique_ptr<ParameterTie> tie) {
-  size_t iPar = tie->getIndex();
-  bool found = false;
-  for (auto &m_tie : m_ties) {
-    if (m_tie->getIndex() == iPar) {
-      found = true;
-      m_tie = std::move(tie);
-      break;
-    }
-  }
-  if (!found) {
-    m_ties.push_back(std::move(tie));
-  }
-}
-
-/**
- * Apply the ties.
- */
-void ParamFunction::applyTies() {
-  for (auto &m_tie : m_ties) {
-    m_tie->eval();
-  }
-}
-
-/**
- * Used to find ParameterTie for a parameter i
- */
-class ReferenceEqual {
-  /// index to find
-  const size_t m_i;
-
-public:
-  /// Constructor
-  explicit ReferenceEqual(size_t i) : m_i(i) {}
-  /// Bracket operator
-  /// @param p :: the element you are looking for
-  /// @return True if found
-  template <class T> bool operator()(const std::unique_ptr<T> &p) {
-    return p->getIndex() == m_i;
-  }
-};
-
-/** Removes i-th parameter's tie if it is tied or does nothing.
- * @param i :: The index of the tied parameter.
- * @return True if successfull
- */
-bool ParamFunction::removeTie(size_t i) {
-  if (i >= nParams()) {
-    throw std::out_of_range("ParamFunction parameter index out of range.");
-  }
-  auto it = std::find_if(m_ties.begin(), m_ties.end(), ReferenceEqual(i));
-  if (it != m_ties.end()) {
-    m_ties.erase(it);
-    unfix(i);
-    return true;
-  }
-  unfix(i);
-  return false;
-}
-
-/** Get tie of parameter number i
- * @param i :: The index of a declared parameter.
- * @return A pointer to the tie
- */
-ParameterTie *ParamFunction::getTie(size_t i) const {
-  if (i >= nParams()) {
-    throw std::out_of_range("ParamFunction parameter index out of range.");
-  }
-  auto it = std::find_if(m_ties.cbegin(), m_ties.cend(), ReferenceEqual(i));
-  if (it != m_ties.cend()) {
-    return it->get();
-  }
-  return nullptr;
-}
-
-/** Remove all ties
- */
-void ParamFunction::clearTies() {
-  for (size_t i = 0; i < nParams(); ++i) {
-    unfix(i);
-  }
-  m_ties.clear();
-}
-
-/** Add a constraint
- *  @param ic :: Pointer to a constraint.
- */
-void ParamFunction::addConstraint(std::unique_ptr<IConstraint> ic) {
-  size_t iPar = ic->getIndex();
-  bool found = false;
-  for (auto &constraint : m_constraints) {
-    if (constraint->getIndex() == iPar) {
-      found = true;
-      constraint = std::move(ic);
-      break;
-    }
-  }
-  if (!found) {
-    m_constraints.push_back(std::move(ic));
-  }
-}
-
-/** Get constraint of parameter number i
- * @param i :: The index of a declared parameter.
- * @return A pointer to the constraint or NULL
- */
-IConstraint *ParamFunction::getConstraint(size_t i) const {
-  if (i >= nParams()) {
-    throw std::out_of_range("ParamFunction parameter index out of range.");
-  }
-  auto it = std::find_if(m_constraints.cbegin(), m_constraints.cend(),
-                         ReferenceEqual(i));
-  if (it != m_constraints.cend()) {
-    return it->get();
-  }
-  return nullptr;
-}
-
-/** Remove a constraint
- * @param parName :: The name of a parameter which constarint to remove.
- */
-void ParamFunction::removeConstraint(const std::string &parName) {
-  size_t iPar = parameterIndex(parName);
-  for (auto it = m_constraints.begin(); it != m_constraints.end(); ++it) {
-    if (iPar == (**it).getIndex()) {
-      m_constraints.erase(it);
-      break;
-    }
-  }
-}
-
-void ParamFunction::setUpForFit() {
-  for (auto &constraint : m_constraints) {
-    constraint->setParamToSatisfyConstraint();
-  }
-}
-
 /// Nonvirtual member which removes all declared parameters
 void ParamFunction::clearAllParameters() {
-  m_ties.clear();
-  m_constraints.clear();
+  clearTies();
+  clearConstraints();
   m_parameters.clear();
   m_parameterNames.clear();
   m_parameterDescriptions.clear();
-  m_isFixed.clear();
+  m_parameterStatus.clear();
+}
+
+/// Change status of parameter
+/// @param i :: Index of a parameter.
+/// @param status :: New parameter status.
+void ParamFunction::setParameterStatus(size_t i, ParameterStatus status) {
+  checkParameterIndex(i);
+  m_parameterStatus[i] = status;
+}
+
+/// Get status of parameter
+/// @param i :: Index of a parameter.
+/// @return Parameter status.
+IFunction::ParameterStatus ParamFunction::getParameterStatus(size_t i) const {
+  checkParameterIndex(i);
+  return m_parameterStatus[i];
 }
 
 /// Get the address of the parameter
 /// @param i :: the index of the parameter required
 /// @returns the address of the parameter
 double *ParamFunction::getParameterAddress(size_t i) {
-  if (i >= nParams()) {
-    throw std::out_of_range("ParamFunction parameter index out of range.");
-  }
+  checkParameterIndex(i);
   return &m_parameters[i];
 }
 
 /// Checks if a parameter has been set explicitly
 bool ParamFunction::isExplicitlySet(size_t i) const {
-  if (i >= nParams()) {
-    throw std::out_of_range("ParamFunction parameter index out of range.");
-  }
+  checkParameterIndex(i);
   return m_explicitlySet[i];
 }
 
@@ -439,8 +260,8 @@ bool ParamFunction::isExplicitlySet(size_t i) const {
  * @return Parameter index or number of nParams() if parameter not found
  */
 size_t ParamFunction::getParameterIndex(const ParameterReference &ref) const {
-  if (ref.getFunction() == this && ref.getIndex() < nParams()) {
-    return ref.getIndex();
+  if (ref.getLocalFunction() == this && ref.getLocalIndex() < nParams()) {
+    return ref.getLocalIndex();
   }
   return nParams();
 }
diff --git a/Framework/API/src/ParameterReference.cpp b/Framework/API/src/ParameterReference.cpp
index d0b8cc52d5c45c3e387235ffba83cd936c9a9755..979289f8a31dba2bd663761d71f987bba9b53fd5 100644
--- a/Framework/API/src/ParameterReference.cpp
+++ b/Framework/API/src/ParameterReference.cpp
@@ -6,7 +6,7 @@ namespace API {
 
 /// Default constructor
 ParameterReference::ParameterReference()
-    : m_function(), m_index(0), m_isDefault(false) {}
+    : m_owner(), m_function(), m_index(0), m_isDefault(false) {}
 
 /**
  * Constructor.
@@ -17,15 +17,26 @@ ParameterReference::ParameterReference()
  *  a tie or a constraint.
  */
 ParameterReference::ParameterReference(IFunction *fun, std::size_t index,
-                                       bool isDefault) {
+                                       bool isDefault)
+    : m_owner(fun), m_function(fun), m_index(index), m_isDefault(isDefault) {
   reset(fun, index, isDefault);
 }
 
-/// Return pointer to the function
-IFunction *ParameterReference::getFunction() const { return m_function; }
+/// Return pointer to the local function
+IFunction *ParameterReference::getLocalFunction() const { return m_function; }
 
-/// Return parameter index in that function
-std::size_t ParameterReference::getIndex() const { return m_index; }
+/// Return parameter index in the local function
+std::size_t ParameterReference::getLocalIndex() const { return m_index; }
+
+/// Return parameter index in the owning function
+std::size_t ParameterReference::parameterIndex() const {
+  return m_owner->getParameterIndex(*this);
+}
+
+/// Return parameter name in the owning function
+std::string ParameterReference::parameterName() const {
+  return m_owner->parameterName(parameterIndex());
+}
 
 /**
  * Reset the reference
@@ -37,6 +48,7 @@ std::size_t ParameterReference::getIndex() const { return m_index; }
  */
 void ParameterReference::reset(IFunction *fun, std::size_t index,
                                bool isDefault) {
+  m_owner = fun;
   IFunction *fLocal = fun;
   size_t iLocal = index;
   CompositeFunction *cf = dynamic_cast<CompositeFunction *>(fun);
@@ -56,9 +68,12 @@ void ParameterReference::reset(IFunction *fun, std::size_t index,
 /**
  * Set the parameter
  * @param value :: A value to set.
+ * @param isExplicitlySet :: Flag that user explicitly set this
+ * parameter.
  */
-void ParameterReference::setParameter(const double &value) {
-  m_function->setParameter(m_index, value);
+void ParameterReference::setParameter(const double &value,
+                                      bool isExplicitlySet) {
+  m_function->setParameter(m_index, value, isExplicitlySet);
 }
 
 /// Get the value of the parameter
@@ -69,5 +84,27 @@ double ParameterReference::getParameter() const {
 /// Returns the default value flag
 bool ParameterReference::isDefault() const { return m_isDefault; }
 
+/// Find out if this refers to a parameter of a function: direct
+/// or via composite function member.
+/// @param fun :: A function to check.
+bool ParameterReference::isParameterOf(const IFunction *fun) const {
+  if (fun == m_function) {
+    return true;
+  }
+  auto fLocal = m_function;
+  size_t iLocal = m_index;
+  auto cf = dynamic_cast<const CompositeFunction *>(m_function);
+  while (cf) {
+    size_t iFun = cf->functionIndex(iLocal);
+    fLocal = cf->getFunction(iFun).get();
+    if (fLocal == fun) {
+      return true;
+    }
+    iLocal = fLocal->parameterIndex(cf->parameterLocalName(iLocal));
+    cf = dynamic_cast<CompositeFunction *>(fLocal);
+  }
+  return false;
+}
+
 } // namespace API
 } // namespace Mantid
diff --git a/Framework/API/src/ParameterTie.cpp b/Framework/API/src/ParameterTie.cpp
index bbe6b9e8dcc8c0aa8eac601325644cc7b52f4d8f..9717ee767b2cc021c699b1458f7d77f45da076a8 100644
--- a/Framework/API/src/ParameterTie.cpp
+++ b/Framework/API/src/ParameterTie.cpp
@@ -115,8 +115,8 @@ double ParameterTie::eval() {
       *(it->first) = it->second.getParameter();
     }
     res = m_parser->Eval();
-  } catch (...) {
-    throw std::runtime_error("Error in expresseion");
+  } catch (mu::ParserError &e) {
+    throw std::runtime_error("Error in expression: " + e.GetMsg());
   }
 
   setParameter(res);
@@ -177,11 +177,10 @@ std::string ParameterTie::asString(const IFunction *fun) const {
  */
 bool ParameterTie::findParametersOf(const IFunction *fun) const {
   for (const auto &varPair : m_varMap) {
-    if (varPair.second.getFunction() == fun) {
+    if (varPair.second.isParameterOf(fun)) {
       return true;
     }
   }
-
   return false;
 }
 
diff --git a/Framework/API/src/NearestNeighbourInfo.cpp b/Framework/API/src/WorkspaceNearestNeighbourInfo.cpp
similarity index 66%
rename from Framework/API/src/NearestNeighbourInfo.cpp
rename to Framework/API/src/WorkspaceNearestNeighbourInfo.cpp
index 7612bddd1e6a909f9af5e9dfe101c98d240b3f6a..2dca965546adb83e31c535b7ac7f189dfef1fd85 100644
--- a/Framework/API/src/NearestNeighbourInfo.cpp
+++ b/Framework/API/src/WorkspaceNearestNeighbourInfo.cpp
@@ -1,35 +1,35 @@
-#include "MantidAPI/NearestNeighbourInfo.h"
-#include "MantidAPI/NearestNeighbours.h"
+#include "MantidAPI/WorkspaceNearestNeighbourInfo.h"
+#include "MantidAPI/WorkspaceNearestNeighbours.h"
 #include "MantidAPI/MatrixWorkspace.h"
 #include "MantidKernel/make_unique.h"
 
 namespace Mantid {
 namespace API {
 
-/** Creates NearestNeighbourInfo.
+/** Creates WorkspaceNearestNeighbourInfo.
 *
 * @param workspace :: Reference to workspace providing instrument and
 * spectrum-detector mapping
 * @param ignoreMaskedDetectors :: if true, masked detectors are ignored
 * @param nNeighbours :: number of neighbours to include
 */
-NearestNeighbourInfo::NearestNeighbourInfo(const MatrixWorkspace &workspace,
-                                           const bool ignoreMaskedDetectors,
-                                           const int nNeighbours)
+WorkspaceNearestNeighbourInfo::WorkspaceNearestNeighbourInfo(
+    const MatrixWorkspace &workspace, const bool ignoreMaskedDetectors,
+    const int nNeighbours)
     : m_workspace(workspace) {
   std::vector<specnum_t> spectrumNumbers;
   for (size_t i = 0; i < m_workspace.getNumberHistograms(); ++i)
     spectrumNumbers.push_back(m_workspace.getSpectrum(i).getSpectrumNo());
 
-  m_nearestNeighbours = Kernel::make_unique<NearestNeighbours>(
+  m_nearestNeighbours = Kernel::make_unique<WorkspaceNearestNeighbours>(
       nNeighbours, workspace.spectrumInfo(), std::move(spectrumNumbers),
       ignoreMaskedDetectors);
 }
 
 // Defined as default in source for forward declaration with std::unique_ptr.
-NearestNeighbourInfo::~NearestNeighbourInfo() = default;
+WorkspaceNearestNeighbourInfo::~WorkspaceNearestNeighbourInfo() = default;
 
-/** Queries the NearestNeighbours object for the selected detector.
+/** Queries the WorkspaceNearestNeighbours object for the selected detector.
 * NOTE! getNeighbours(spectrumNumber, radius) is MUCH faster.
 *
 * @param comp :: pointer to the querying detector
@@ -37,8 +37,8 @@ NearestNeighbourInfo::~NearestNeighbourInfo() = default;
 * @return map of DetectorID to distance for the nearest neighbours
 */
 std::map<specnum_t, Kernel::V3D>
-NearestNeighbourInfo::getNeighbours(const Geometry::IDetector *comp,
-                                    const double radius) const {
+WorkspaceNearestNeighbourInfo::getNeighbours(const Geometry::IDetector *comp,
+                                             const double radius) const {
   // Find the spectrum number
   std::vector<specnum_t> spectra = m_workspace.getSpectraFromDetectorIDs(
       std::vector<detid_t>(1, comp->getID()));
@@ -51,24 +51,27 @@ NearestNeighbourInfo::getNeighbours(const Geometry::IDetector *comp,
   return m_nearestNeighbours->neighboursInRadius(spectra[0], radius);
 }
 
-/** Queries the NearestNeighbours object for the selected spectrum number.
+/** Queries the WorkspaceNearestNeighbours object for the selected spectrum
+* number.
 *
 * @param spec :: spectrum number of the detector you are looking at
 * @param radius :: distance from detector on which to filter results
 * @return map of DetectorID to distance for the nearest neighbours
 */
 std::map<specnum_t, Kernel::V3D>
-NearestNeighbourInfo::getNeighbours(specnum_t spec, const double radius) const {
+WorkspaceNearestNeighbourInfo::getNeighbours(specnum_t spec,
+                                             const double radius) const {
   return m_nearestNeighbours->neighboursInRadius(spec, radius);
 }
 
-/** Queries the NearestNeighbours object for the selected spectrum number.
+/** Queries the WorkspaceNearestNeighbours object for the selected spectrum
+* number.
 *
 * @param spec :: spectrum number of the detector you are looking at
 * @return map of DetectorID to distance for the nearest neighbours
 */
 std::map<specnum_t, Kernel::V3D>
-NearestNeighbourInfo::getNeighboursExact(specnum_t spec) const {
+WorkspaceNearestNeighbourInfo::getNeighboursExact(specnum_t spec) const {
   return m_nearestNeighbours->neighbours(spec);
 }
 
diff --git a/Framework/API/src/NearestNeighbours.cpp b/Framework/API/src/WorkspaceNearestNeighbours.cpp
similarity index 90%
rename from Framework/API/src/NearestNeighbours.cpp
rename to Framework/API/src/WorkspaceNearestNeighbours.cpp
index 9f094269ade399b5944cac26ba3f19f0ada12663..de2b5ad4d0c0be602ffb268ce0a5c535826e188f 100644
--- a/Framework/API/src/NearestNeighbours.cpp
+++ b/Framework/API/src/WorkspaceNearestNeighbours.cpp
@@ -1,4 +1,4 @@
-#include "MantidAPI/NearestNeighbours.h"
+#include "MantidAPI/WorkspaceNearestNeighbours.h"
 #include "MantidAPI/SpectrumInfo.h"
 #include "MantidGeometry/Instrument.h"
 #include "MantidGeometry/Instrument/DetectorGroup.h"
@@ -24,10 +24,9 @@ using Kernel::V3D;
  * @param ignoreMaskedDetectors :: flag indicating that masked detectors should
  * be ignored.
  */
-NearestNeighbours::NearestNeighbours(int nNeighbours,
-                                     const SpectrumInfo &spectrumInfo,
-                                     std::vector<specnum_t> spectrumNumbers,
-                                     bool ignoreMaskedDetectors)
+WorkspaceNearestNeighbours::WorkspaceNearestNeighbours(
+    int nNeighbours, const SpectrumInfo &spectrumInfo,
+    std::vector<specnum_t> spectrumNumbers, bool ignoreMaskedDetectors)
     : m_spectrumInfo(spectrumInfo),
       m_spectrumNumbers(std::move(spectrumNumbers)),
       m_noNeighbours(nNeighbours), m_cutoff(-DBL_MAX), m_radius(0),
@@ -42,7 +41,7 @@ NearestNeighbours::NearestNeighbours(int nNeighbours,
  * @return map of Detector ID's to distance
  */
 std::map<specnum_t, V3D>
-NearestNeighbours::neighbours(const specnum_t spectrum) const {
+WorkspaceNearestNeighbours::neighbours(const specnum_t spectrum) const {
   return defaultNeighbours(spectrum);
 }
 
@@ -55,8 +54,8 @@ NearestNeighbours::neighbours(const specnum_t spectrum) const {
  * @throw NotFoundError if component is not recognised as a detector
  */
 std::map<specnum_t, V3D>
-NearestNeighbours::neighboursInRadius(const specnum_t spectrum,
-                                      const double radius) const {
+WorkspaceNearestNeighbours::neighboursInRadius(const specnum_t spectrum,
+                                               const double radius) const {
   // If the radius is stupid then don't let it continue as well be stuck forever
   if (radius < 0.0 || radius > 10.0) {
     throw std::invalid_argument(
@@ -71,7 +70,7 @@ NearestNeighbours::neighboursInRadius(const specnum_t spectrum,
       // moment mean that
       // it is necessary.
       // Cast is necessary as the user should see this as a const member
-      const_cast<NearestNeighbours *>(this)->build(eightNearest);
+      const_cast<WorkspaceNearestNeighbours *>(this)->build(eightNearest);
     }
     result = defaultNeighbours(spectrum);
   } else if (radius > m_cutoff && m_radius != radius) {
@@ -79,7 +78,7 @@ NearestNeighbours::neighboursInRadius(const specnum_t spectrum,
     int neighbours = m_noNeighbours + 1;
     while (true) {
       try {
-        const_cast<NearestNeighbours *>(this)->build(neighbours);
+        const_cast<WorkspaceNearestNeighbours *>(this)->build(neighbours);
       } catch (std::invalid_argument &) {
         break;
       }
@@ -109,7 +108,7 @@ NearestNeighbours::neighboursInRadius(const specnum_t spectrum,
  * @param noNeighbours :: The number of nearest neighbours to use to build
  * the graph
  */
-void NearestNeighbours::build(const int noNeighbours) {
+void WorkspaceNearestNeighbours::build(const int noNeighbours) {
   const auto indices = getSpectraDetectors();
   if (indices.empty()) {
     throw std::runtime_error(
@@ -201,7 +200,7 @@ void NearestNeighbours::build(const int noNeighbours) {
  * @throw NotFoundError if detector ID is not recognised
  */
 std::map<specnum_t, V3D>
-NearestNeighbours::defaultNeighbours(const specnum_t spectrum) const {
+WorkspaceNearestNeighbours::defaultNeighbours(const specnum_t spectrum) const {
   auto vertex = m_specToVertex.find(spectrum);
 
   if (vertex != m_specToVertex.end()) {
@@ -224,7 +223,7 @@ NearestNeighbours::defaultNeighbours(const specnum_t spectrum) const {
 }
 
 /// Returns the list of valid spectrum indices
-std::vector<size_t> NearestNeighbours::getSpectraDetectors() {
+std::vector<size_t> WorkspaceNearestNeighbours::getSpectraDetectors() {
   std::vector<size_t> indices;
   for (size_t i = 0; i < m_spectrumNumbers.size(); ++i) {
     // Always ignore monitors and ignore masked detectors if requested.
diff --git a/Framework/API/test/CompositeFunctionTest.h b/Framework/API/test/CompositeFunctionTest.h
index c4cf053d64db1298315186dba12c542302e6fd56..43824cf55ce45a806331233d936d0bce1bffe989 100644
--- a/Framework/API/test/CompositeFunctionTest.h
+++ b/Framework/API/test/CompositeFunctionTest.h
@@ -603,18 +603,18 @@ public:
 
     TS_ASSERT_EQUALS(mfun->nParams(), 12);
 
-    TS_ASSERT_EQUALS(mfun->getParameter(0), 154);
-    TS_ASSERT_EQUALS(mfun->getParameter(1), 77);
-    TS_ASSERT_EQUALS(mfun->getParameter(2), 1.1);
-    TS_ASSERT_EQUALS(mfun->getParameter(3), 1.2);
-    TS_ASSERT_EQUALS(mfun->getParameter(4), 1.65);
-    TS_ASSERT_EQUALS(mfun->getParameter(5), 2.1);
-    TS_ASSERT_EQUALS(mfun->getParameter(6), 2.4 * 2.4);
-    TS_ASSERT_EQUALS(mfun->getParameter(7), sqrt(2.4));
-    TS_ASSERT_EQUALS(mfun->getParameter(8), 2.4);
-    TS_ASSERT_EQUALS(mfun->getParameter(9), 3.1);
-    TS_ASSERT_EQUALS(mfun->getParameter(10), 79.1);
-    TS_ASSERT_EQUALS(mfun->getParameter(11), 3.3);
+    TS_ASSERT_EQUALS(mfun->getParameter("f0.a"), 154);
+    TS_ASSERT_EQUALS(mfun->getParameter("f0.b"), 77);
+    TS_ASSERT_EQUALS(mfun->getParameter("f1.c"), 1.1);
+    TS_ASSERT_EQUALS(mfun->getParameter("f1.h"), 1.2);
+    TS_ASSERT_EQUALS(mfun->getParameter("f1.s"), 1.65);
+    TS_ASSERT_EQUALS(mfun->getParameter("f2.c0"), 2.1);
+    TS_ASSERT_EQUALS(mfun->getParameter("f2.c1"), 2.4 * 2.4);
+    TS_ASSERT_EQUALS(mfun->getParameter("f2.c2"), sqrt(2.4));
+    TS_ASSERT_EQUALS(mfun->getParameter("f2.c3"), 2.4);
+    TS_ASSERT_EQUALS(mfun->getParameter("f3.c"), 3.1);
+    TS_ASSERT_EQUALS(mfun->getParameter("f3.h"), 79.1);
+    TS_ASSERT_EQUALS(mfun->getParameter("f3.s"), 3.3);
 
     delete mfun;
   }
@@ -1073,11 +1073,11 @@ public:
 
     TS_ASSERT_EQUALS(mfun->nParams(), 5);
 
-    TS_ASSERT(!mfun->isFixed(0));
-    TS_ASSERT(mfun->isFixed(1));
-    TS_ASSERT(!mfun->isFixed(2));
-    TS_ASSERT(mfun->isFixed(3));
-    TS_ASSERT(mfun->isFixed(4));
+    TS_ASSERT(mfun->isActive(0));  // f0.a
+    TS_ASSERT(!mfun->isActive(1)); // f0.b
+    TS_ASSERT(mfun->isActive(2));  // f1.c
+    TS_ASSERT(!mfun->isActive(3)); // f1.h
+    TS_ASSERT(mfun->isFixed(4));   // f1.s
 
     mfun->applyTies();
 
@@ -1114,9 +1114,9 @@ public:
 
     TS_ASSERT_EQUALS(mfun->nParams(), 3);
 
-    TS_ASSERT(!mfun->isFixed(0));
-    TS_ASSERT(!mfun->isFixed(1));
-    TS_ASSERT(mfun->isFixed(2));
+    TS_ASSERT(mfun->isActive(0));
+    TS_ASSERT(mfun->isActive(1));
+    TS_ASSERT(!mfun->isActive(2));
 
     mfun->applyTies();
 
diff --git a/Framework/API/test/DetectorSearcherTest.h b/Framework/API/test/DetectorSearcherTest.h
new file mode 100644
index 0000000000000000000000000000000000000000..aae46f50b23b19b171dbcb4d6ac8a374349bdfb3
--- /dev/null
+++ b/Framework/API/test/DetectorSearcherTest.h
@@ -0,0 +1,235 @@
+#ifndef MANTID_API_DETECTORSEARCHERTEST_H_
+#define MANTID_API_DETECTORSEARCHERTEST_H_
+
+#include "MantidAPI/DetectorSearcher.h"
+#include "MantidAPI/DetectorInfo.h"
+#include "MantidAPI/ExperimentInfo.h"
+#include "MantidBeamline/DetectorInfo.h"
+#include "MantidTestHelpers/ComponentCreationHelper.h"
+#include "MantidKernel/V3D.h"
+
+#include <cmath>
+#include <cxxtest/TestSuite.h>
+
+using Mantid::Kernel::V3D;
+using namespace Mantid;
+using namespace Mantid::Geometry;
+using namespace Mantid::API;
+
+class DetectorSearcherTest : public CxxTest::TestSuite {
+public:
+  void test_init() {
+    auto inst1 = ComponentCreationHelper::createTestInstrumentCylindrical(
+        3, V3D(0, 0, -1), V3D(0, 0, 0), 1.6, 1.0);
+    auto inst2 =
+        ComponentCreationHelper::createTestInstrumentRectangular2(1, 100);
+
+    ExperimentInfo expInfo1;
+    expInfo1.setInstrument(inst1);
+    ExperimentInfo expInfo2;
+    expInfo2.setInstrument(inst2);
+
+    TS_ASSERT_THROWS_NOTHING(
+        DetectorSearcher searcher(inst1, expInfo1.detectorInfo()))
+    TS_ASSERT_THROWS_NOTHING(
+        DetectorSearcher searcher(inst2, expInfo2.detectorInfo()))
+  }
+
+  void test_search_cylindrical() {
+    auto inst = ComponentCreationHelper::createTestInstrumentCylindrical(
+        3, V3D(0, 0, -1), V3D(0, 0, 0), 1.6, 1.0);
+
+    ExperimentInfo expInfo;
+    expInfo.setInstrument(inst);
+
+    DetectorSearcher searcher(inst, expInfo.detectorInfo());
+    const auto checkResult = [&searcher](const V3D &q, size_t index) {
+      const auto result = searcher.findDetectorIndex(q);
+      TS_ASSERT(std::get<0>(result))
+      TS_ASSERT_EQUALS(std::get<1>(result), index)
+    };
+
+    checkResult(V3D(0.913156, 0.285361, 0.291059), 0);
+    checkResult(V3D(-6.09343e-17, 0.995133, 0.0985376), 1);
+    checkResult(V3D(-0.913156, 0.285361, 0.291059), 2);
+    checkResult(V3D(0.959758, -1.17536e-16, 0.280828), 3);
+
+    checkResult(V3D(-0.959758, -0, 0.280828), 5);
+    checkResult(V3D(0.913156, -0.285361, 0.291059), 6);
+    checkResult(V3D(-6.09343e-17, -0.995133, 0.0985376), 7);
+    checkResult(V3D(-0.913156, -0.285361, 0.291059), 8);
+    checkResult(V3D(0.942022, 0.294382, 0.161038), 9);
+    checkResult(V3D(-6.11563e-17, 0.998759, 0.0498137), 10);
+    checkResult(V3D(-0.942022, 0.294382, 0.161038), 11);
+    checkResult(V3D(0.988034, -1.20999e-16, 0.154233), 12);
+
+    checkResult(V3D(-0.988034, -0, 0.154233), 14);
+    checkResult(V3D(0.942022, -0.294382, 0.161038), 15);
+    checkResult(V3D(-6.11563e-17, -0.998759, 0.0498137), 16);
+    checkResult(V3D(-0.942022, -0.294382, 0.161038), 17);
+    checkResult(V3D(0.948717, 0.296474, 0.109725), 18);
+    checkResult(V3D(-6.11984e-17, 0.999446, 0.0332779), 19);
+    checkResult(V3D(-0.948717, 0.296474, 0.109725), 20);
+    checkResult(V3D(0.994483, -1.21789e-16, 0.104898), 21);
+
+    checkResult(V3D(-0.994483, -0, 0.104898), 23);
+    checkResult(V3D(0.948717, -0.296474, 0.109725), 24);
+    checkResult(V3D(-6.11984e-17, -0.999446, 0.0332779), 25);
+    checkResult(V3D(-0.948717, -0.296474, 0.109725), 26);
+  }
+
+  void test_invalid_rectangular() {
+    auto inst =
+        ComponentCreationHelper::createTestInstrumentRectangular2(1, 100);
+
+    ExperimentInfo expInfo;
+    expInfo.setInstrument(inst);
+    const auto &info = expInfo.detectorInfo();
+
+    DetectorSearcher searcher(inst, info);
+    const auto resultNull = searcher.findDetectorIndex(V3D(0, 0, 0));
+    TS_ASSERT(!std::get<0>(resultNull))
+
+    const auto resultNaN = searcher.findDetectorIndex(V3D(NAN, NAN, NAN));
+    TS_ASSERT(!std::get<0>(resultNaN))
+  }
+
+  void test_invalid_cylindrical() {
+    auto inst = ComponentCreationHelper::createTestInstrumentCylindrical(
+        3, V3D(0, 0, -1), V3D(0, 0, 0), 1.6, 1.0);
+    ExperimentInfo expInfo;
+    expInfo.setInstrument(inst);
+    const auto &info = expInfo.detectorInfo();
+
+    DetectorSearcher searcher(inst, info);
+    const auto resultNull = searcher.findDetectorIndex(V3D(0, 0, 0));
+    TS_ASSERT(!std::get<0>(resultNull))
+
+    const auto resultNaN = searcher.findDetectorIndex(V3D(NAN, NAN, NAN));
+    TS_ASSERT(!std::get<0>(resultNaN))
+  }
+
+  void test_search_rectangular() {
+    auto inst =
+        ComponentCreationHelper::createTestInstrumentRectangular2(1, 100);
+    ExperimentInfo expInfo;
+    expInfo.setInstrument(inst);
+    const auto &info = expInfo.detectorInfo();
+
+    DetectorSearcher searcher(inst, info);
+    const auto checkResult = [&searcher](V3D q, size_t index) {
+      const auto result = searcher.findDetectorIndex(q);
+      TS_ASSERT(std::get<0>(result))
+      TS_ASSERT_EQUALS(std::get<1>(result), index)
+    };
+
+    for (size_t pointNo = 0; pointNo < info.size(); ++pointNo) {
+      const auto &det = info.detector(pointNo);
+      const auto q = convertDetectorPositionToQ(det);
+      checkResult(q, pointNo);
+    }
+  }
+
+  V3D convertDetectorPositionToQ(const IDetector &det) {
+    const auto tt1 = det.getTwoTheta(V3D(0, 0, 0), V3D(0, 0, 1)); // two theta
+    const auto ph1 = det.getPhi();                                // phi
+    auto E1 =
+        V3D(-std::sin(tt1) * std::cos(ph1), -std::sin(tt1) * std::sin(ph1),
+            1. - std::cos(tt1));  // end of trajectory
+    return E1 * (1. / E1.norm()); // normalize
+  }
+};
+
+class DetectorSearcherTestPerformance : public CxxTest::TestSuite {
+public:
+  void test_rectangular() {
+    auto inst =
+        ComponentCreationHelper::createTestInstrumentRectangular2(1, 100);
+    ExperimentInfo expInfo;
+    expInfo.setInstrument(inst);
+    const auto &info = expInfo.detectorInfo();
+
+    DetectorSearcher searcher(inst, info);
+
+    std::vector<double> xDirections(100);
+    std::vector<double> yDirections(100);
+    std::vector<double> zDirections(50);
+
+    // create x values of the range -1 to 1
+    int index = 0;
+    double startValue = -1;
+    std::generate(
+        xDirections.begin(), xDirections.end(),
+        [&index, &startValue]() { return startValue + index++ * 0.1; });
+
+    // create z values of the range 0.1 to 1
+    // ignore negative z values as these are not physical!
+    index = 0;
+    startValue = 0.1;
+    std::generate(
+        zDirections.begin(), zDirections.end(),
+        [&index, &startValue]() { return startValue + index++ * 0.1; });
+
+    yDirections = xDirections;
+
+    size_t hitCount = 0;
+    for (auto &x : xDirections) {
+      for (auto &y : yDirections) {
+        for (auto &z : zDirections) {
+          const auto result = searcher.findDetectorIndex(V3D(x, y, z));
+          if (std::get<0>(result))
+            ++hitCount;
+        }
+      }
+    }
+
+    TS_ASSERT_EQUALS(hitCount, 246)
+  }
+
+  void test_cylindrical() {
+    auto inst = ComponentCreationHelper::createTestInstrumentCylindrical(
+        3, V3D(0, 0, -1), V3D(0, 0, 0), 1.6, 1.0);
+
+    ExperimentInfo expInfo;
+    expInfo.setInstrument(inst);
+    const auto &info = expInfo.detectorInfo();
+
+    DetectorSearcher searcher(inst, info);
+
+    std::vector<double> xDirections(50);
+    std::vector<double> yDirections(50);
+    std::vector<double> zDirections(50);
+
+    // create x values of the range -1 to 1
+    int index = 0;
+    double startValue = -1;
+    std::generate(
+        xDirections.begin(), xDirections.end(),
+        [&index, &startValue]() { return startValue + index++ * 0.1; });
+
+    // create z values of the range 0.1 to 1
+    // ignore negative z values as these are not physical!
+    index = 0;
+    startValue = 0.1;
+    std::generate(
+        zDirections.begin(), zDirections.end(),
+        [&index, &startValue]() { return startValue + index++ * 0.1; });
+
+    yDirections = xDirections;
+
+    size_t hitCount = 0;
+    for (auto &x : xDirections) {
+      for (auto &y : yDirections) {
+        for (auto &z : zDirections) {
+          const auto result = searcher.findDetectorIndex(V3D(x, y, z));
+          if (std::get<0>(result))
+            ++hitCount;
+        }
+      }
+    }
+
+    TS_ASSERT_EQUALS(hitCount, 16235)
+  }
+};
+
+#endif
diff --git a/Framework/API/test/FunctionTest.h b/Framework/API/test/FunctionTest.h
index 58c89921caf70680c5b55cda7df1e1cbd0d6a195..e37da29efceb22330079dc8592e81c002811217d 100644
--- a/Framework/API/test/FunctionTest.h
+++ b/Framework/API/test/FunctionTest.h
@@ -269,7 +269,8 @@ public:
     TS_ASSERT(!f.isFixed(0));
     TS_ASSERT(f.isFixed(1));
     TS_ASSERT(!f.isFixed(2));
-    TS_ASSERT(f.isFixed(3));
+    TS_ASSERT(!f.isFixed(3));
+    TS_ASSERT(!f.isActive(3));
 
     TS_ASSERT(f.isActive(0));
     TS_ASSERT(!f.isActive(1));
@@ -334,9 +335,12 @@ public:
     TS_ASSERT_EQUALS(f.getParameter("c3"), 3.3);
 
     TS_ASSERT(!f.isFixed(0));
-    TS_ASSERT(f.isFixed(1));
+    TS_ASSERT(!f.isFixed(1));
+    TS_ASSERT(!f.isActive(1));
     TS_ASSERT(!f.isFixed(2));
+    TS_ASSERT(f.isActive(2));
     TS_ASSERT(!f.isFixed(3));
+    TS_ASSERT(f.isActive(3));
 
     TS_ASSERT(!f.getTie(0));
     TS_ASSERT(f.getTie(1) && !f.getTie(1)->isDefault());
diff --git a/Framework/API/test/ImmutableCompositeFunctionTest.h b/Framework/API/test/ImmutableCompositeFunctionTest.h
index e15872611464696244eb74df62828001e803c68d..d97de5ba69c74d0cbe143b212995aa5181ec9637 100644
--- a/Framework/API/test/ImmutableCompositeFunctionTest.h
+++ b/Framework/API/test/ImmutableCompositeFunctionTest.h
@@ -279,9 +279,13 @@ public:
     icf.addTies("b2=b1,a2=a1/5");
     icf.applyTies();
 
-    TS_ASSERT_EQUALS(icf.asString(), "name=ImmutableCompositeFunctionTest_"
-                                     "Function,NumDeriv=false,a1=11,b1=12,a2=2."
-                                     "2,b2=12,ties=(a2=a1/5,b2=b1)");
+    auto icfString = icf.asString();
+    TS_ASSERT_EQUALS(icfString.substr(0, 91),
+                     "name=ImmutableCompositeFunctionTest_"
+                     "Function,NumDeriv=false,a1=11,b1=12,a2=2.2,b2=12,ties=(");
+    auto icfTies = icfString.substr(91);
+    TS_ASSERT(icfTies.find("a2=a1/5") != std::string::npos)
+    TS_ASSERT(icfTies.find("b2=b1") != std::string::npos)
 
     auto fun = FunctionFactory::Instance().createInitialized(icf.asString());
     TS_ASSERT(fun);
diff --git a/Framework/API/test/ParameterReferenceTest.h b/Framework/API/test/ParameterReferenceTest.h
index c7574f73382d077f72434582b4f891cbf3c7cb73..188dfefd7ba04759a6d8d23b76371389016b575d 100644
--- a/Framework/API/test/ParameterReferenceTest.h
+++ b/Framework/API/test/ParameterReferenceTest.h
@@ -86,37 +86,37 @@ public:
     TS_ASSERT_EQUALS(f1->getContainingFunction(r12), f1_2);
     TS_ASSERT_EQUALS(f1_2->getContainingFunction(r12), f1_2_1);
 
-    TS_ASSERT_EQUALS(r0.getFunction(), f0.get());
-    TS_ASSERT_EQUALS(r1.getFunction(), f0.get());
-    TS_ASSERT_EQUALS(r2.getFunction(), f0.get());
+    TS_ASSERT_EQUALS(r0.getLocalFunction(), f0.get());
+    TS_ASSERT_EQUALS(r1.getLocalFunction(), f0.get());
+    TS_ASSERT_EQUALS(r2.getLocalFunction(), f0.get());
 
-    TS_ASSERT_EQUALS(r0.getIndex(), 0);
-    TS_ASSERT_EQUALS(r1.getIndex(), 1);
-    TS_ASSERT_EQUALS(r2.getIndex(), 2);
+    TS_ASSERT_EQUALS(r0.getLocalIndex(), 0);
+    TS_ASSERT_EQUALS(r1.getLocalIndex(), 1);
+    TS_ASSERT_EQUALS(r2.getLocalIndex(), 2);
 
-    TS_ASSERT_EQUALS(r3.getFunction(), f1_0.get());
-    TS_ASSERT_EQUALS(r4.getFunction(), f1_0.get());
-    TS_ASSERT_EQUALS(r5.getFunction(), f1_0.get());
+    TS_ASSERT_EQUALS(r3.getLocalFunction(), f1_0.get());
+    TS_ASSERT_EQUALS(r4.getLocalFunction(), f1_0.get());
+    TS_ASSERT_EQUALS(r5.getLocalFunction(), f1_0.get());
 
-    TS_ASSERT_EQUALS(r3.getIndex(), 0);
-    TS_ASSERT_EQUALS(r4.getIndex(), 1);
-    TS_ASSERT_EQUALS(r5.getIndex(), 2);
+    TS_ASSERT_EQUALS(r3.getLocalIndex(), 0);
+    TS_ASSERT_EQUALS(r4.getLocalIndex(), 1);
+    TS_ASSERT_EQUALS(r5.getLocalIndex(), 2);
 
-    TS_ASSERT_EQUALS(r6.getFunction(), f1_1.get());
-    TS_ASSERT_EQUALS(r7.getFunction(), f1_1.get());
-    TS_ASSERT_EQUALS(r8.getFunction(), f1_1.get());
+    TS_ASSERT_EQUALS(r6.getLocalFunction(), f1_1.get());
+    TS_ASSERT_EQUALS(r7.getLocalFunction(), f1_1.get());
+    TS_ASSERT_EQUALS(r8.getLocalFunction(), f1_1.get());
 
-    TS_ASSERT_EQUALS(r6.getIndex(), 0);
-    TS_ASSERT_EQUALS(r7.getIndex(), 1);
-    TS_ASSERT_EQUALS(r8.getIndex(), 2);
+    TS_ASSERT_EQUALS(r6.getLocalIndex(), 0);
+    TS_ASSERT_EQUALS(r7.getLocalIndex(), 1);
+    TS_ASSERT_EQUALS(r8.getLocalIndex(), 2);
 
-    TS_ASSERT_EQUALS(r9.getFunction(), f1_2_0.get());
-    TS_ASSERT_EQUALS(r10.getFunction(), f1_2_0.get());
-    TS_ASSERT_EQUALS(r11.getFunction(), f1_2_0.get());
+    TS_ASSERT_EQUALS(r9.getLocalFunction(), f1_2_0.get());
+    TS_ASSERT_EQUALS(r10.getLocalFunction(), f1_2_0.get());
+    TS_ASSERT_EQUALS(r11.getLocalFunction(), f1_2_0.get());
 
-    TS_ASSERT_EQUALS(r9.getIndex(), 0);
-    TS_ASSERT_EQUALS(r10.getIndex(), 1);
-    TS_ASSERT_EQUALS(r11.getIndex(), 2);
+    TS_ASSERT_EQUALS(r9.getLocalIndex(), 0);
+    TS_ASSERT_EQUALS(r10.getLocalIndex(), 1);
+    TS_ASSERT_EQUALS(r11.getLocalIndex(), 2);
 
     delete cf;
   }
diff --git a/Framework/API/test/ParameterTieTest.h b/Framework/API/test/ParameterTieTest.h
index 15544970263f2b58c982d2a215c928c6f4434a17..1cb68a7645206f0dc5b8dbb53cd7dd401931380d 100644
--- a/Framework/API/test/ParameterTieTest.h
+++ b/Framework/API/test/ParameterTieTest.h
@@ -118,8 +118,8 @@ public:
     TS_ASSERT_EQUALS(tie.asString(&mfun), "f1.sig=f2.sig^2+f0.a+1");
 
     TS_ASSERT_DELTA(tie.eval(), 5.8, 0.00001);
-    TS_ASSERT_EQUALS(tie.getFunction(), g1.get());
-    TS_ASSERT_EQUALS(tie.getIndex(), 2);
+    TS_ASSERT_EQUALS(tie.getLocalFunction(), g1.get());
+    TS_ASSERT_EQUALS(tie.getLocalIndex(), 2);
 
     TS_ASSERT_THROWS(mustThrow1(&mfun), std::invalid_argument);
     TS_ASSERT_THROWS(mustThrow2(&mfun), std::invalid_argument);
@@ -144,8 +144,8 @@ public:
     TS_ASSERT_EQUALS(tie.asString(&mfun), "f0.b=f3.sig^2+f1.a+1");
 
     TS_ASSERT_DELTA(tie.eval(), 2, 0.00001);
-    TS_ASSERT_EQUALS(tie.getFunction(), bk1.get());
-    TS_ASSERT_EQUALS(tie.getIndex(), 1);
+    TS_ASSERT_EQUALS(tie.getLocalFunction(), bk1.get());
+    TS_ASSERT_EQUALS(tie.getLocalIndex(), 1);
 
     mfun.removeFunction(2);
     TS_ASSERT_EQUALS(tie.asString(&mfun), "f0.b=f2.sig^2+f1.a+1");
@@ -213,7 +213,7 @@ public:
 
     ParameterTie tie(&bk, "b", "2*a-1");
 
-    TS_ASSERT_EQUALS(tie.getIndex(), 1);
+    TS_ASSERT_EQUALS(tie.getLocalIndex(), 1);
     TS_ASSERT_DELTA(tie.eval(), 0.6, 0.00001);
     TS_ASSERT_THROWS(mustThrow4(&bk), std::invalid_argument);
     TS_ASSERT_THROWS(mustThrow5(&bk), std::invalid_argument);
diff --git a/Framework/API/test/NearestNeighbourInfoTest.h b/Framework/API/test/WorkspaceNearestNeighbourInfoTest.h
similarity index 68%
rename from Framework/API/test/NearestNeighbourInfoTest.h
rename to Framework/API/test/WorkspaceNearestNeighbourInfoTest.h
index f8fa58c3e4d72ae8275216a845528f2a378fe4d9..f74e380af23ee0955910c731cf2d5bc03f315562 100644
--- a/Framework/API/test/NearestNeighbourInfoTest.h
+++ b/Framework/API/test/WorkspaceNearestNeighbourInfoTest.h
@@ -5,21 +5,23 @@
 
 #include "MantidTestHelpers/FakeObjects.h"
 #include "MantidTestHelpers/InstrumentCreationHelper.h"
-#include "MantidAPI/NearestNeighbourInfo.h"
+#include "MantidAPI/WorkspaceNearestNeighbourInfo.h"
 #include "MantidAPI/SpectrumInfo.h"
 
-using Mantid::API::NearestNeighbourInfo;
+using Mantid::API::WorkspaceNearestNeighbourInfo;
 
-class NearestNeighbourInfoTest : public CxxTest::TestSuite {
+class WorkspaceNearestNeighbourInfoTest : public CxxTest::TestSuite {
 public:
   // This pair of boilerplate methods prevent the suite being created statically
   // This means the constructor isn't called when running other tests
-  static NearestNeighbourInfoTest *createSuite() {
-    return new NearestNeighbourInfoTest();
+  static WorkspaceNearestNeighbourInfoTest *createSuite() {
+    return new WorkspaceNearestNeighbourInfoTest();
+  }
+  static void destroySuite(WorkspaceNearestNeighbourInfoTest *suite) {
+    delete suite;
   }
-  static void destroySuite(NearestNeighbourInfoTest *suite) { delete suite; }
 
-  NearestNeighbourInfoTest() {
+  WorkspaceNearestNeighbourInfoTest() {
     workspace.initialize(100, 1, 1);
     InstrumentCreationHelper::addFullInstrumentToWorkspace(workspace, false,
                                                            false, "");
@@ -29,15 +31,15 @@ public:
   }
 
   void test_construct() {
-    TS_ASSERT_THROWS_NOTHING(NearestNeighbourInfo(workspace, false));
+    TS_ASSERT_THROWS_NOTHING(WorkspaceNearestNeighbourInfo(workspace, false));
   }
 
   void test_neighbourCount() {
     // No detailed test, just checking if parameters are passed on to
     // NearestNeighbours correctly.
-    NearestNeighbourInfo nn2(workspace, false, 2);
+    WorkspaceNearestNeighbourInfo nn2(workspace, false, 2);
     TS_ASSERT_EQUALS(nn2.getNeighboursExact(3).size(), 2);
-    NearestNeighbourInfo nn4(workspace, false, 4);
+    WorkspaceNearestNeighbourInfo nn4(workspace, false, 4);
     const auto neighbours = nn4.getNeighboursExact(3);
     TS_ASSERT_EQUALS(neighbours.size(), 4);
     TS_ASSERT_EQUALS(neighbours.count(1), 1);
@@ -46,9 +48,9 @@ public:
   void test_neighbourCount_ignoreMasked() {
     // No detailed test, just checking if parameters are passed on to
     // NearestNeighbours correctly.
-    NearestNeighbourInfo nn2(workspace, true, 2);
+    WorkspaceNearestNeighbourInfo nn2(workspace, true, 2);
     TS_ASSERT_EQUALS(nn2.getNeighboursExact(3).size(), 2);
-    NearestNeighbourInfo nn4(workspace, true, 4);
+    WorkspaceNearestNeighbourInfo nn4(workspace, true, 4);
     const auto neighbours = nn4.getNeighboursExact(3);
     TS_ASSERT_EQUALS(neighbours.size(), 4);
     TS_ASSERT_EQUALS(neighbours.count(1), 0);
diff --git a/Framework/API/test/NearestNeighboursTest.h b/Framework/API/test/WorkspaceNearestNeighboursTest.h
similarity index 89%
rename from Framework/API/test/NearestNeighboursTest.h
rename to Framework/API/test/WorkspaceNearestNeighboursTest.h
index 0369aaacabbf069c5fff98582b8e0df241163e7e..22320d7276a1bb874c3d0c06da4bae845f63bd0d 100644
--- a/Framework/API/test/NearestNeighboursTest.h
+++ b/Framework/API/test/WorkspaceNearestNeighboursTest.h
@@ -1,7 +1,7 @@
 #ifndef MANTID_TEST_GEOMETRY_NEARESTNEIGHBOURS
 #define MANTID_TEST_GEOMETRY_NEARESTNEIGHBOURS
 
-#include "MantidAPI/NearestNeighbours.h"
+#include "MantidAPI/WorkspaceNearestNeighbours.h"
 #include "MantidAPI/SpectrumInfo.h"
 #include "MantidGeometry/IDetector.h"
 #include "MantidGeometry/Instrument/Detector.h"
@@ -46,20 +46,22 @@ std::vector<specnum_t> getSpectrumNumbers(const MatrixWorkspace &workspace) {
 //=====================================================================================
 // Functional tests
 //=====================================================================================
-class NearestNeighboursTest : public CxxTest::TestSuite {
+class WorkspaceNearestNeighboursTest : public CxxTest::TestSuite {
 private:
   /// Helper type giving access to protected methods. Makes testing of NN
   /// internals possible.
-  class ExposedNearestNeighbours : public Mantid::API::NearestNeighbours {
+  class ExposedNearestNeighbours
+      : public Mantid::API::WorkspaceNearestNeighbours {
   public:
     ExposedNearestNeighbours(const SpectrumInfo &spectrumInfo,
                              const std::vector<specnum_t> spectrumNumbers,
                              bool ignoreMasked = false)
-        : NearestNeighbours(8, spectrumInfo, spectrumNumbers, ignoreMasked) {}
+        : WorkspaceNearestNeighbours(8, spectrumInfo, spectrumNumbers,
+                                     ignoreMasked) {}
 
     // Direct access to intermdiate spectra detectors
     std::vector<size_t> getSpectraDetectors() {
-      return NearestNeighbours::getSpectraDetectors();
+      return WorkspaceNearestNeighbours::getSpectraDetectors();
     }
   };
 
@@ -71,8 +73,8 @@ public:
         ComponentCreationHelper::createTestInstrumentCylindrical(2));
 
     // Create the NearestNeighbours object directly.
-    NearestNeighbours nn(actualNeighboursNumber, ws->spectrumInfo(),
-                         getSpectrumNumbers(*ws));
+    WorkspaceNearestNeighbours nn(actualNeighboursNumber, ws->spectrumInfo(),
+                                  getSpectrumNumbers(*ws));
 
     // Check distances calculated in NearestNeighbours compare with those using
     // getDistance on component
@@ -88,7 +90,8 @@ public:
         ComponentCreationHelper::createTestInstrumentCylindrical(2));
 
     // Create the NearestNeighbours object directly.
-    NearestNeighbours nn(8, ws->spectrumInfo(), getSpectrumNumbers(*ws));
+    WorkspaceNearestNeighbours nn(8, ws->spectrumInfo(),
+                                  getSpectrumNumbers(*ws));
 
     detid2det_map m_detectors;
     ws->getInstrument()->getDetectors(m_detectors);
@@ -144,7 +147,8 @@ public:
         ComponentCreationHelper::createTestInstrumentRectangular(2, 16));
 
     // Create the NearestNeighbours object directly.
-    NearestNeighbours nn(8, ws->spectrumInfo(), getSpectrumNumbers(*ws));
+    WorkspaceNearestNeighbours nn(8, ws->spectrumInfo(),
+                                  getSpectrumNumbers(*ws));
 
     const auto &m_instrument = ws->getInstrument();
     // Correct # of detectors
@@ -209,7 +213,8 @@ public:
         ComponentCreationHelper::createTestInstrumentCylindrical(2));
 
     // Create the NearestNeighbours object directly.
-    NearestNeighbours nn(8, ws->spectrumInfo(), getSpectrumNumbers(*ws));
+    WorkspaceNearestNeighbours nn(8, ws->spectrumInfo(),
+                                  getSpectrumNumbers(*ws));
     for (size_t i = 0; i < 2000; i++) {
       nn.neighboursInRadius(1, 5.0);
     }
@@ -224,7 +229,7 @@ public:
     const auto &spectrumInfo = ws->spectrumInfo();
     const auto spectrumNumbers = getSpectrumNumbers(*ws);
     for (size_t i = 0; i < 2000; i++) {
-      NearestNeighbours nn(8, spectrumInfo, spectrumNumbers);
+      WorkspaceNearestNeighbours nn(8, spectrumInfo, spectrumNumbers);
       nn.neighbours(1);
     }
   }
diff --git a/Framework/Algorithms/CMakeLists.txt b/Framework/Algorithms/CMakeLists.txt
index 21ad795708b779ff06db54f13a9a728c1dec5e9b..a770b9938ec271a77efe8d055cbe4c3db9482a0e 100644
--- a/Framework/Algorithms/CMakeLists.txt
+++ b/Framework/Algorithms/CMakeLists.txt
@@ -131,6 +131,7 @@ set ( SRC_FILES
 	src/FindCenterOfMassPosition2.cpp
 	src/FindDeadDetectors.cpp
 	src/FindDetectorsOutsideLimits.cpp
+	src/FindEPP.cpp
 	src/FindPeakBackground.cpp
 	src/FindPeaks.cpp
 	src/FitPeak.cpp
@@ -450,6 +451,7 @@ set ( INC_FILES
 	inc/MantidAlgorithms/FindCenterOfMassPosition2.h
 	inc/MantidAlgorithms/FindDeadDetectors.h
 	inc/MantidAlgorithms/FindDetectorsOutsideLimits.h
+	inc/MantidAlgorithms/FindEPP.h
 	inc/MantidAlgorithms/FindPeakBackground.h
 	inc/MantidAlgorithms/FindPeaks.h
 	inc/MantidAlgorithms/FitPeak.h
@@ -777,6 +779,7 @@ set ( TEST_FILES
 	FindCenterOfMassPositionTest.h
 	FindDeadDetectorsTest.h
 	FindDetectorsOutsideLimitsTest.h
+	FindEPPTest.h
 	FindPeakBackgroundTest.h
 	FindPeaksTest.h
 	FitPeakTest.h
diff --git a/Framework/Algorithms/inc/MantidAlgorithms/FindEPP.h b/Framework/Algorithms/inc/MantidAlgorithms/FindEPP.h
new file mode 100644
index 0000000000000000000000000000000000000000..61ba47b88b95c0e1d38432c0232996ae85def098
--- /dev/null
+++ b/Framework/Algorithms/inc/MantidAlgorithms/FindEPP.h
@@ -0,0 +1,57 @@
+#ifndef MANTID_ALGORITHMS_FINDEPP_H_
+#define MANTID_ALGORITHMS_FINDEPP_H_
+
+#include "MantidAPI/Algorithm.h"
+#include "MantidAPI/ITableWorkspace.h"
+#include "MantidAPI/MatrixWorkspace.h"
+#include "MantidAlgorithms/DllConfig.h"
+
+namespace Mantid {
+namespace Algorithms {
+
+/** Performs Gaussian fits over each spectrum to find the Elastic Peak
+ Position (EPP).
+
+  Copyright &copy; 2017 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge
+  National Laboratory & European Spallation Source
+
+  This file is part of Mantid.
+
+  Mantid is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  Mantid is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+  File change history is stored at: <https://github.com/mantidproject/mantid>
+  Code Documentation is available at: <http://doxygen.mantidproject.org>
+*/
+class MANTID_ALGORITHMS_DLL FindEPP : public API::Algorithm {
+public:
+  const std::string name() const override;
+  int version() const override;
+  const std::string category() const override;
+  const std::string summary() const override;
+
+private:
+  void init() override;
+  void exec() override;
+  void fitGaussian(int64_t);
+  void initWorkspace();
+
+  Mantid::API::MatrixWorkspace_sptr m_inWS;
+  Mantid::API::ITableWorkspace_sptr m_outWS;
+  std::unique_ptr<Mantid::API::Progress> m_progress;
+};
+
+} // namespace Algorithms
+} // namespace Mantid
+
+#endif /* MANTID_ALGORITHMS_FINDEPP_H_ */
diff --git a/Framework/Algorithms/inc/MantidAlgorithms/ReflectometryReductionOne2.h b/Framework/Algorithms/inc/MantidAlgorithms/ReflectometryReductionOne2.h
index 34a4d1461b09fb6c5315cb9bd7b088c87f966838..4ac333833b4e591184f0fc62006b08802284fa2d 100644
--- a/Framework/Algorithms/inc/MantidAlgorithms/ReflectometryReductionOne2.h
+++ b/Framework/Algorithms/inc/MantidAlgorithms/ReflectometryReductionOne2.h
@@ -4,6 +4,15 @@
 #include "MantidAlgorithms/ReflectometryWorkflowBase2.h"
 
 namespace Mantid {
+// Forward declaration
+namespace API {
+class SpectrumInfo;
+}
+namespace HistogramData {
+class HistogramX;
+class HistogramY;
+class HistogramE;
+}
 namespace Algorithms {
 
 /** ReflectometryReductionOne2 : Reflectometry reduction of a single input TOF
@@ -59,15 +68,86 @@ private:
   // Create a direct beam workspace from input workspace in wavelength
   Mantid::API::MatrixWorkspace_sptr
   makeDirectBeamWS(Mantid::API::MatrixWorkspace_sptr inputWS);
+  // Performs direct beam correction
+  Mantid::API::MatrixWorkspace_sptr
+  directBeamCorrection(Mantid::API::MatrixWorkspace_sptr detectorWS);
+  // Performs transmission or algorithm correction
+  Mantid::API::MatrixWorkspace_sptr
+  transOrAlgCorrection(Mantid::API::MatrixWorkspace_sptr detectorWS,
+                       const bool detectorWSReduced);
   // Performs transmission corrections
   Mantid::API::MatrixWorkspace_sptr
-  transmissionCorrection(Mantid::API::MatrixWorkspace_sptr detectorWS);
+  transmissionCorrection(Mantid::API::MatrixWorkspace_sptr detectorWS,
+                         const bool detectorWSReduced);
   // Performs transmission corrections using alternative correction algorithms
   Mantid::API::MatrixWorkspace_sptr
   algorithmicCorrection(Mantid::API::MatrixWorkspace_sptr detectorWS);
+  // Performs monitor corrections
+  Mantid::API::MatrixWorkspace_sptr
+  monitorCorrection(Mantid::API::MatrixWorkspace_sptr detectorWS);
   // convert to momentum transfer
   Mantid::API::MatrixWorkspace_sptr
   convertToQ(Mantid::API::MatrixWorkspace_sptr inputWS);
+  // Create the output workspace in wavelength
+  Mantid::API::MatrixWorkspace_sptr makeIvsLam();
+  // Do the reduction by summation in Q
+  Mantid::API::MatrixWorkspace_sptr
+  sumInQ(API::MatrixWorkspace_sptr detectorWS);
+  // Do the summation in Q for a single input value
+  void sumInQProcessValue(const int inputIdx, const double twoTheta,
+                          const double bTwoTheta,
+                          const HistogramData::HistogramX &inputX,
+                          const HistogramData::HistogramY &inputY,
+                          const HistogramData::HistogramE &inputE,
+                          const std::vector<size_t> &detectors,
+                          const size_t outSpecIdx,
+                          API::MatrixWorkspace_sptr IvsLam,
+                          std::vector<double> &outputE);
+  // Share counts to a projected value for summation in Q
+  void sumInQShareCounts(const double inputCounts, const double inputErr,
+                         const double bLambda, const double lambdaMin,
+                         const double lambdaMax, const size_t outSpecIdx,
+                         API::MatrixWorkspace_sptr IvsLam,
+                         std::vector<double> &outputE);
+  // Construct the output workspace
+  void findIvsLamRange(API::MatrixWorkspace_sptr detectorWS,
+                       const std::vector<size_t> &detectors, double &xMin,
+                       double &xMax);
+  // Construct the output workspace
+  Mantid::API::MatrixWorkspace_sptr
+  constructIvsLamWS(API::MatrixWorkspace_sptr detectorWS);
+  // Whether summation should be done in Q or the default lambda
+  bool summingInQ();
+  // Get projected coordinates onto twoThetaR
+  void getProjectedLambdaRange(const double lambda, const double twoTheta,
+                               const double bLambda, const double bTwoTheta,
+                               const std::vector<size_t> &detectors,
+                               double &lambdaTop, double &lambdaBot);
+  // Check whether two spectrum maps match
+  void verifySpectrumMaps(API::MatrixWorkspace_const_sptr ws1,
+                          API::MatrixWorkspace_const_sptr ws2,
+                          const bool severe);
+
+  // Find and cache constants
+  void findDetectorGroups();
+  void findTheta0();
+  // Accessors for detectors and theta and lambda values
+  const std::vector<std::vector<size_t>> &detectorGroups() const {
+    return m_detectorGroups;
+  };
+  double theta0() { return m_theta0; }
+  double twoThetaR(const std::vector<size_t> &detectors);
+  size_t twoThetaRDetectorIdx(const std::vector<size_t> &detectors);
+
+  API::MatrixWorkspace_sptr m_runWS;
+  const API::SpectrumInfo *m_spectrumInfo;
+  bool m_convertUnits;          // convert the input workspace to lambda
+  bool m_normaliseMonitors;     // normalise by monitors and direct beam
+  bool m_normaliseTransmission; // transmission or algorithmic correction
+  bool m_sum;                   // whether to do summation
+  double m_theta0;              // horizon angle
+  // groups of spectrum indices of the detectors of interest
+  std::vector<std::vector<size_t>> m_detectorGroups;
 };
 
 } // namespace Algorithms
diff --git a/Framework/Algorithms/inc/MantidAlgorithms/ReflectometryWorkflowBase2.h b/Framework/Algorithms/inc/MantidAlgorithms/ReflectometryWorkflowBase2.h
index 2556f1b72978f9ab65ddffe090bfc106c34daf54..a1823aa32a0fa0ee94f05dc38a1477a6fbe8a5e5 100644
--- a/Framework/Algorithms/inc/MantidAlgorithms/ReflectometryWorkflowBase2.h
+++ b/Framework/Algorithms/inc/MantidAlgorithms/ReflectometryWorkflowBase2.h
@@ -35,6 +35,8 @@ namespace Algorithms {
 class DLLExport ReflectometryWorkflowBase2
     : public API::DataProcessorAlgorithm {
 protected:
+  /// Initialize reduction-type properties
+  void initReductionProperties();
   /// Initialize monitor properties
   void initMonitorProperties();
   /// Initialize direct beam properties
@@ -47,6 +49,8 @@ protected:
   void initAlgorithmicProperties(bool autodetect = false);
   /// Initialize momentum transfer properties
   void initMomentumTransferProperties();
+  /// Validate reduction-type properties
+  std::map<std::string, std::string> validateReductionProperties() const;
   /// Validate direct beam properties
   std::map<std::string, std::string> validateDirectBeamProperties() const;
   /// Validate transmission properties
@@ -61,7 +65,8 @@ protected:
   cropWavelength(Mantid::API::MatrixWorkspace_sptr inputWS);
   // Create a detector workspace from input workspace in wavelength
   Mantid::API::MatrixWorkspace_sptr
-  makeDetectorWS(Mantid::API::MatrixWorkspace_sptr inputWS);
+  makeDetectorWS(Mantid::API::MatrixWorkspace_sptr inputWS,
+                 const bool convert = true);
   // Create a monitor workspace from input workspace in wavelength
   Mantid::API::MatrixWorkspace_sptr
   makeMonitorWS(Mantid::API::MatrixWorkspace_sptr inputWS,
diff --git a/Framework/Algorithms/inc/MantidAlgorithms/SpatialGrouping.h b/Framework/Algorithms/inc/MantidAlgorithms/SpatialGrouping.h
index a435ab9ec6e035d55ba9fde4d43b0bcfa232e5dc..3171b7d784d40c6246a05de736583e709108b42f 100644
--- a/Framework/Algorithms/inc/MantidAlgorithms/SpatialGrouping.h
+++ b/Framework/Algorithms/inc/MantidAlgorithms/SpatialGrouping.h
@@ -2,7 +2,7 @@
 #define MANTID_ALGORITHMS_SPATIAL_GROUPING_H_
 
 #include "MantidAPI/Algorithm.h"
-#include "MantidAPI/NearestNeighbourInfo.h"
+#include "MantidAPI/WorkspaceNearestNeighbourInfo.h"
 #include "MantidGeometry/IDTypes.h"
 
 namespace Mantid {
@@ -91,7 +91,7 @@ private:
   std::vector<std::vector<int>> m_groups;
 
   /// NearestNeighbourInfo used by expandNet()
-  std::unique_ptr<API::NearestNeighbourInfo> m_neighbourInfo;
+  std::unique_ptr<API::WorkspaceNearestNeighbourInfo> m_neighbourInfo;
 };
 
 } // namespace Algorithms
diff --git a/Framework/Algorithms/src/AnnularRingAbsorption.cpp b/Framework/Algorithms/src/AnnularRingAbsorption.cpp
index ae6300113a296876ba2f4b0f5094eb3f0c0bed78..8715fb3127e2df0cd07fe7a6d50eb0d01c0f8b6d 100644
--- a/Framework/Algorithms/src/AnnularRingAbsorption.cpp
+++ b/Framework/Algorithms/src/AnnularRingAbsorption.cpp
@@ -178,13 +178,15 @@ AnnularRingAbsorption::createSampleShapeXML(const V3D &upAxis) const {
   const double lowRadiusMtr = (wallMidPtCM - 0.5 * sampleThickCM) / 100.;
   const double uppRadiusMtr = (wallMidPtCM + 0.5 * sampleThickCM) / 100.;
 
-  // Cylinders oriented along Y, with origin at centre of bottom base
+  // Cylinders oriented along Y, with origin at the centre as expected by
+  // the MonteCarloAbsorption algorithm.
+  const V3D bottomCentre{0.0, -sampleHeightCM / 2.0 / 100.0, 0.0}; // in metres.
   const std::string innerCylID = std::string("inner-cyl");
-  const std::string innerCyl = cylinderXML(innerCylID, V3D(), lowRadiusMtr,
-                                           upAxis, sampleHeightCM / 100.0);
+  const std::string innerCyl = cylinderXML(
+      innerCylID, bottomCentre, lowRadiusMtr, upAxis, sampleHeightCM / 100.0);
   const std::string outerCylID = std::string("outer-cyl");
-  const std::string outerCyl = cylinderXML(outerCylID, V3D(), uppRadiusMtr,
-                                           upAxis, sampleHeightCM / 100.0);
+  const std::string outerCyl = cylinderXML(
+      outerCylID, bottomCentre, uppRadiusMtr, upAxis, sampleHeightCM / 100.0);
 
   // Combine shapes
   boost::format algebra("<algebra val=\"(%1% (# %2%))\" />");
diff --git a/Framework/Algorithms/src/FindEPP.cpp b/Framework/Algorithms/src/FindEPP.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..628913042603d63cf7ed8da9a500ab8ba32a5006
--- /dev/null
+++ b/Framework/Algorithms/src/FindEPP.cpp
@@ -0,0 +1,208 @@
+#include "MantidAlgorithms/FindEPP.h"
+#include "MantidAPI/TableRow.h"
+#include "MantidAPI/WorkspaceFactory.h"
+#include "MantidKernel/make_unique.h"
+
+#include <cmath>
+#include <sstream>
+
+namespace Mantid {
+namespace Algorithms {
+
+using namespace Mantid::Kernel;
+using namespace Mantid::API;
+
+// Register the algorithm into the AlgorithmFactory
+DECLARE_ALGORITHM(FindEPP)
+
+//----------------------------------------------------------------------------------------------
+
+/// Algorithms name for identification. @see Algorithm::name
+const std::string FindEPP::name() const { return "FindEPP"; }
+
+/// Algorithm's version for identification. @see Algorithm::version
+int FindEPP::version() const { return 2; }
+
+/// Algorithm's category for identification. @see Algorithm::category
+const std::string FindEPP::category() const {
+  return "Workflow\\MLZ\\TOFTOF;Utility";
+}
+
+/// Algorithm's summary for use in the GUI and help. @see Algorithm::summary
+const std::string FindEPP::summary() const {
+  return "Performs Gaussian fits over each spectrum to find the Elastic Peak "
+         "Position (EPP).";
+}
+
+//----------------------------------------------------------------------------------------------
+/** Initialize the algorithm's properties.
+ */
+void FindEPP::init() {
+  declareProperty(Kernel::make_unique<WorkspaceProperty<API::MatrixWorkspace>>(
+                      "InputWorkspace", "", Direction::Input),
+                  "An input workspace.");
+  declareProperty(Kernel::make_unique<WorkspaceProperty<API::ITableWorkspace>>(
+                      "OutputWorkspace", "", Direction::Output),
+                  "An output workspace.");
+}
+
+//----------------------------------------------------------------------------------------------
+/** Execute the algorithm.
+ */
+void FindEPP::exec() {
+  m_inWS = getProperty("InputWorkspace");
+
+  initWorkspace();
+
+  int64_t numberspectra = static_cast<int64_t>(m_inWS->getNumberHistograms());
+
+  // Loop over spectra
+  PARALLEL_FOR_IF(threadSafe(*m_inWS, *m_outWS))
+  for (int64_t index = 0; index < numberspectra; ++index) {
+    PARALLEL_START_INTERUPT_REGION
+    fitGaussian(index);
+    PARALLEL_END_INTERUPT_REGION
+  }
+  PARALLEL_CHECK_INTERUPT_REGION
+
+  setProperty("OutputWorkspace", m_outWS);
+}
+
+/* Call Fit as child algorithm for each spectra
+ * @param index : the workspace index
+ */
+void FindEPP::fitGaussian(int64_t index) {
+  size_t spectrum = static_cast<size_t>(index);
+  m_outWS->cell<int>(spectrum, 0) = static_cast<int>(spectrum);
+
+  const auto &x = m_inWS->x(spectrum).rawData();
+  const auto &y = m_inWS->y(spectrum).rawData();
+  const auto &e = m_inWS->e(spectrum).rawData();
+
+  // Find the maximum value and it's index
+  const auto maxIt = std::max_element(y.begin(), y.end());
+  const double height = *maxIt;
+  size_t maxIndex = static_cast<size_t>(std::distance(y.begin(), maxIt));
+
+  if (height > 0) {
+    // Find how many bins are around maximum, that are above half-maximum
+    // Initialize the distances of the half-maxima bins from maximum
+    size_t leftHalf = maxIndex, rightHalf = x.size() - maxIndex - 1;
+
+    // Find the first bin on the right side of maximum, that drops below
+    // half-maximum
+    for (auto it = maxIt; it != y.end(); ++it) {
+      if (*it < 0.5 * height) {
+        rightHalf = it - maxIt - 1;
+        break;
+      }
+    }
+
+    // Find the first bin on the left side of maximum, that drops below
+    // half-maximum
+    for (auto it = maxIt; it != y.begin(); --it) {
+      if (*it < 0.5 * height) {
+        leftHalf = maxIt - it - 1;
+        break;
+      }
+    }
+    g_log.debug() << "Peak in spectrum #" << spectrum
+                  << " has last bins above 0.5*max at " << leftHalf << "\t"
+                  << rightHalf << "\n";
+
+    // We want to fit only if there are at least 3 bins (including the maximum
+    // itself) above half-maximum
+    if (rightHalf + leftHalf >= 2) {
+
+      // Prepare the initial parameters for the fit
+      double fwhm = x[maxIndex + rightHalf] - x[maxIndex - leftHalf];
+      double sigma = fwhm / (2. * sqrt(2. * log(2.)));
+      double center = x[maxIndex];
+      double start = center - 3. * fwhm;
+      double end = center + 3. * fwhm;
+
+      std::stringstream function;
+      function << "name=Gaussian,PeakCentre=";
+      function << center << ",Height=" << height << ",Sigma=" << sigma;
+
+      g_log.debug() << "Fitting spectrum #" << spectrum
+                    << " with: " << function.str() << "\n";
+
+      IAlgorithm_sptr fitAlg = createChildAlgorithm("Fit", 0., 0., false);
+      fitAlg->setProperty("Function", function.str());
+      fitAlg->setProperty("InputWorkspace", m_inWS);
+      fitAlg->setProperty("WorkspaceIndex", static_cast<int>(spectrum));
+      fitAlg->setProperty("StartX", start);
+      fitAlg->setProperty("EndX", end);
+      fitAlg->setProperty("CreateOutput", true);
+      fitAlg->setProperty("OutputParametersOnly", true);
+      fitAlg->executeAsChildAlg();
+
+      const std::string status = fitAlg->getProperty("OutputStatus");
+      ITableWorkspace_sptr fitResult = fitAlg->getProperty("OutputParameters");
+
+      if (status == "success") {
+        m_outWS->cell<double>(spectrum, 1) = fitResult->cell<double>(1, 1);
+        m_outWS->cell<double>(spectrum, 2) = fitResult->cell<double>(1, 2);
+        m_outWS->cell<double>(spectrum, 3) = fitResult->cell<double>(2, 1);
+        m_outWS->cell<double>(spectrum, 4) = fitResult->cell<double>(2, 2);
+        m_outWS->cell<double>(spectrum, 5) = fitResult->cell<double>(0, 1);
+        m_outWS->cell<double>(spectrum, 6) = fitResult->cell<double>(0, 2);
+        m_outWS->cell<double>(spectrum, 7) = fitResult->cell<double>(3, 1);
+        m_outWS->cell<std::string>(spectrum, 8) = status;
+      } else {
+        g_log.debug() << "Fit failed in spectrum #" << spectrum
+                      << ". \nReason :" << status
+                      << ". \nSetting the maximum.\n";
+        m_outWS->cell<std::string>(spectrum, 8) = "fitFailed";
+        m_outWS->cell<double>(spectrum, 1) = x[maxIndex];
+        m_outWS->cell<double>(spectrum, 2) = 0.;
+        m_outWS->cell<double>(spectrum, 5) = height;
+        m_outWS->cell<double>(spectrum, 6) = e[maxIndex];
+      }
+
+    } else {
+      g_log.information() << "Found <=3 bins above half maximum in spectrum #"
+                          << index << ". Not fitting.\n";
+      m_outWS->cell<std::string>(spectrum, 8) = "narrowPeak";
+      m_outWS->cell<double>(spectrum, 1) = x[maxIndex];
+      m_outWS->cell<double>(spectrum, 2) = 0.;
+      m_outWS->cell<double>(spectrum, 5) = height;
+      m_outWS->cell<double>(spectrum, 6) = e[maxIndex];
+    }
+  } else {
+    g_log.notice() << "Negative maximum in spectrum #" << spectrum
+                   << ". Skipping.\n";
+    m_outWS->cell<std::string>(spectrum, 8) = "negativeMaximum";
+  }
+  m_progress->report();
+}
+
+/**
+ * Initializes the output workspace
+ */
+void FindEPP::initWorkspace() {
+
+  m_outWS = WorkspaceFactory::Instance().createTable("TableWorkspace");
+
+  const std::vector<std::string> columns = {
+      "PeakCentre", "PeakCentreError", "Sigma", "SigmaError",
+      "Height",     "HeightError",     "chiSq"};
+
+  m_outWS->addColumn("int", "WorkspaceIndex");
+  m_outWS->getColumn(0)->setPlotType(1);
+  for (const auto &column : columns) {
+    m_outWS->addColumn("double", column);
+  }
+  m_outWS->addColumn("str", "FitStatus");
+
+  const size_t numberSpectra = m_inWS->getNumberHistograms();
+  m_progress = make_unique<Progress>(this, 0, 1, numberSpectra);
+
+  for (size_t i = 0; i < numberSpectra; ++i) {
+    m_outWS->appendRow();
+  }
+}
+
+} // namespace Algorithms
+} // namespace Mantid
diff --git a/Framework/Algorithms/src/ReflectometryReductionOne2.cpp b/Framework/Algorithms/src/ReflectometryReductionOne2.cpp
index 714701a0695069935f85ef1f0d1b91bd9ac3bbdc..b4ad895419486aa55c6c6d8f13e99022076d1cc6 100644
--- a/Framework/Algorithms/src/ReflectometryReductionOne2.cpp
+++ b/Framework/Algorithms/src/ReflectometryReductionOne2.cpp
@@ -1,11 +1,21 @@
 #include "MantidAlgorithms/ReflectometryReductionOne2.h"
 #include "MantidAPI/Axis.h"
+#include "MantidAPI/SpectrumInfo.h"
 #include "MantidAPI/MatrixWorkspace.h"
+#include "MantidAPI/WorkspaceFactory.h"
+#include "MantidHistogramData/LinearGenerator.h"
+#include "MantidIndexing/IndexInfo.h"
 #include "MantidKernel/MandatoryValidator.h"
+#include "MantidKernel/StringTokenizer.h"
 #include "MantidKernel/Unit.h"
 
+#include <algorithm>
+#include <boost/lexical_cast.hpp>
+
 using namespace Mantid::Kernel;
 using namespace Mantid::API;
+using namespace Mantid::HistogramData;
+using namespace Mantid::Indexing;
 
 namespace Mantid {
 namespace Algorithms {
@@ -13,6 +23,230 @@ namespace Algorithms {
 /*Anonomous namespace */
 namespace {
 
+/** Get the twoTheta angle for the centre of the detector associated with the
+* given spectrum
+*
+* @param spectrumInfo : the spectrum info
+* @param spectrumIdx : the workspace index of the spectrum
+* @return : the twoTheta angle in radians
+*/
+double getDetectorTwoTheta(const SpectrumInfo *spectrumInfo,
+                           const size_t spectrumIdx) {
+  return spectrumInfo->signedTwoTheta(spectrumIdx);
+}
+
+/** Get the twoTheta angle range for the top/bottom of the detector associated
+* with the given spectrum
+*
+* @param spectrumInfo : the spectrum info
+* @param spectrumIdx : the workspace index of the spectrum
+* @return : the twoTheta angle in radians
+*/
+double getDetectorTwoThetaRange(const SpectrumInfo *spectrumInfo,
+                                const size_t spectrumIdx) {
+  // Assume the range covered by this pixel is the diff between this
+  // pixel's twoTheta and the next/prev pixel)
+  double twoTheta = getDetectorTwoTheta(spectrumInfo, spectrumIdx);
+  double bTwoTheta = 0;
+
+  if (spectrumIdx + 1 < spectrumInfo->size()) {
+    bTwoTheta = getDetectorTwoTheta(spectrumInfo, spectrumIdx + 1) - twoTheta;
+  }
+
+  return bTwoTheta;
+}
+
+/** Get the start/end of the lambda range for the detector associated
+* with the given spectrum
+*
+* @return : the lambda range
+*/
+double getLambdaRange(const HistogramX &xValues, const int xIdx) {
+  // The lambda range is the bin width from the given index to the next.
+  if (xIdx < 0 || xIdx + 1 >= static_cast<int>(xValues.size())) {
+    throw std::runtime_error("Error accessing X values out of range (index=" +
+                             std::to_string(xIdx + 1) + ", size=" +
+                             std::to_string(xValues.size()));
+  }
+
+  double result = xValues[xIdx + 1] - xValues[xIdx];
+  return result;
+}
+
+/** Get the lambda value at the centre of the detector associated
+* with the given spectrum
+*
+* @return : the lambda range
+*/
+double getLambda(const HistogramX &xValues, const int xIdx) {
+  if (xIdx < 0 || xIdx >= static_cast<int>(xValues.size())) {
+    throw std::runtime_error("Error accessing X values out of range (index=" +
+                             std::to_string(xIdx) + ", size=" +
+                             std::to_string(xValues.size()));
+  }
+
+  // The centre of the bin is the lower bin edge plus half the width
+  return xValues[xIdx] + getLambdaRange(xValues, xIdx) / 2.0;
+}
+
+/** @todo The following translation functions are duplicates of code in
+* GroupDetectors2.cpp. Longer term, we should move them to a common location if
+* possible */
+
+/* The following functions are used to translate single operators into
+* groups, just like the ones this algorithm loads from .map files.
+*
+* Each function takes a string, such as "3+4", or "6:10" and then adds
+* the resulting groups of spectra to outGroups.
+*/
+
+// An add operation, i.e. "3+4" -> [3+4]
+void translateAdd(const std::string &instructions,
+                  std::vector<std::vector<size_t>> &outGroups) {
+  auto spectra = Kernel::StringTokenizer(
+      instructions, "+", Kernel::StringTokenizer::TOK_TRIM |
+                             Kernel::StringTokenizer::TOK_IGNORE_EMPTY);
+
+  std::vector<size_t> outSpectra;
+  outSpectra.reserve(spectra.count());
+  for (auto spectrum : spectra) {
+    // add this spectrum to the group we're about to add
+    outSpectra.push_back(boost::lexical_cast<size_t>(spectrum));
+  }
+  outGroups.push_back(std::move(outSpectra));
+}
+
+// A range summation, i.e. "3-6" -> [3+4+5+6]
+void translateSumRange(const std::string &instructions,
+                       std::vector<std::vector<size_t>> &outGroups) {
+  // add a group with the sum of the spectra in the range
+  auto spectra = Kernel::StringTokenizer(instructions, "-");
+  if (spectra.count() != 2)
+    throw std::runtime_error("Malformed range (-) operation.");
+  // fetch the start and stop spectra
+  size_t first = boost::lexical_cast<size_t>(spectra[0]);
+  size_t last = boost::lexical_cast<size_t>(spectra[1]);
+  // swap if they're back to front
+  if (first > last)
+    std::swap(first, last);
+
+  // add all the spectra in the range to the output group
+  std::vector<size_t> outSpectra;
+  outSpectra.reserve(last - first + 1);
+  for (size_t i = first; i <= last; ++i)
+    outSpectra.push_back(i);
+  if (!outSpectra.empty())
+    outGroups.push_back(std::move(outSpectra));
+}
+
+// A range insertion, i.e. "3:6" -> [3,4,5,6]
+void translateRange(const std::string &instructions,
+                    std::vector<std::vector<size_t>> &outGroups) {
+  // add a group per spectra
+  auto spectra = Kernel::StringTokenizer(
+      instructions, ":", Kernel::StringTokenizer::TOK_IGNORE_EMPTY);
+  if (spectra.count() != 2)
+    throw std::runtime_error("Malformed range (:) operation.");
+  // fetch the start and stop spectra
+  size_t first = boost::lexical_cast<size_t>(spectra[0]);
+  size_t last = boost::lexical_cast<size_t>(spectra[1]);
+  // swap if they're back to front
+  if (first > last)
+    std::swap(first, last);
+
+  // add all the spectra in the range to separate output groups
+  for (size_t i = first; i <= last; ++i) {
+    // create group of size 1 with the spectrum and add it to output
+    outGroups.emplace_back(1, i);
+  }
+}
+
+/**
+* Translate the processing instructions into a vector of groups of indices
+*
+* @param instructions : Instructions to translate
+* @return : A vector of groups, each group being a vector of its 0-based
+* spectrum indices
+*/
+std::vector<std::vector<size_t>>
+translateInstructions(const std::string &instructions) {
+  std::vector<std::vector<size_t>> outGroups;
+
+  try {
+    // split into comma separated groups, each group potentially containing
+    // an operation (+-:) that produces even more groups.
+    auto groups = Kernel::StringTokenizer(
+        instructions, ",",
+        StringTokenizer::TOK_TRIM | StringTokenizer::TOK_IGNORE_EMPTY);
+    for (const auto &groupStr : groups) {
+      // Look for the various operators in the string. If one is found then
+      // do the necessary translation into groupings.
+      if (groupStr.find('+') != std::string::npos) {
+        // add a group with the given spectra
+        translateAdd(groupStr, outGroups);
+      } else if (groupStr.find('-') != std::string::npos) {
+        translateSumRange(groupStr, outGroups);
+      } else if (groupStr.find(':') != std::string::npos) {
+        translateRange(groupStr, outGroups);
+      } else if (!groupStr.empty()) {
+        // contains no instructions, just add this spectrum as a new group
+        // create group of size 1 with the spectrum in it and add it to output
+        outGroups.emplace_back(1, boost::lexical_cast<size_t>(groupStr));
+      }
+    }
+  } catch (boost::bad_lexical_cast &) {
+    throw std::runtime_error("Invalid processing instructions: " +
+                             instructions);
+  }
+
+  return outGroups;
+}
+
+/**
+* Map a spectrum index from the given map to the given workspace
+* @param originWS : the original workspace
+* @param mapIdx : the index in the original workspace
+* @param destWS : the destination workspace
+* @return : the index in the destination workspace
+*/
+size_t mapSpectrumIndexToWorkspace(MatrixWorkspace_const_sptr originWS,
+                                   const size_t originIdx,
+                                   MatrixWorkspace_const_sptr destWS) {
+
+  SpectrumNumber specId = originWS->indexInfo().spectrumNumber(originIdx);
+  size_t wsIdx =
+      destWS->getIndexFromSpectrumNumber(static_cast<specnum_t>(specId));
+  return wsIdx;
+}
+
+/**
+* @param originWS : Origin workspace, which provides the original workspace
+* index to spectrum number mapping.
+* @param hostWS : Workspace onto which the resulting workspace indexes will be
+* hosted
+* @throws :: If the specId are not found to exist on the host end-point
+*workspace.
+* @return :: Remapped workspace indexes applicable for the host workspace,
+*as a vector of groups of vectors of spectrum indices
+*/
+std::vector<std::vector<size_t>> mapSpectrumIndicesToWorkspace(
+    MatrixWorkspace_const_sptr originWS, MatrixWorkspace_const_sptr hostWS,
+    const std::vector<std::vector<size_t>> &detectorGroups) {
+
+  std::vector<std::vector<size_t>> hostGroups;
+
+  for (auto group : detectorGroups) {
+    std::vector<size_t> hostDetectors;
+    for (auto i : group) {
+      const size_t hostIdx = mapSpectrumIndexToWorkspace(originWS, i, hostWS);
+      hostDetectors.push_back(hostIdx);
+    }
+    hostGroups.push_back(hostDetectors);
+  }
+
+  return hostGroups;
+}
+
 /**
 * Translate all the workspace indexes in an origin workspace into workspace
 * indexes of a host end-point workspace. This is done using spectrum numbers as
@@ -24,43 +258,73 @@ namespace {
 * hosted
 * @throws :: If the specId are not found to exist on the host end-point
 *workspace.
-* @return :: Remapped workspace indexes applicable for the host workspace.
-*results
+* @return :: Remapped workspace indexes applicable for the host workspace,
 *as comma separated string.
 */
-std::string
-createProcessingCommandsFromDetectorWS(MatrixWorkspace_const_sptr originWS,
-                                       MatrixWorkspace_const_sptr hostWS) {
-  auto spectrumMap = originWS->getSpectrumToWorkspaceIndexMap();
-  auto it = spectrumMap.begin();
-  std::stringstream result;
-  specnum_t specId = (*it).first;
-  result << static_cast<int>(hostWS->getIndexFromSpectrumNumber(specId));
-  ++it;
-  for (; it != spectrumMap.end(); ++it) {
-    specId = (*it).first;
-    result << ","
-           << static_cast<int>(hostWS->getIndexFromSpectrumNumber(specId));
-  }
-  return result.str();
-}
+std::string createProcessingCommandsFromDetectorWS(
+    MatrixWorkspace_const_sptr originWS, MatrixWorkspace_const_sptr hostWS,
+    const std::vector<std::vector<size_t>> &detectorGroups) {
 
-/**
-@param ws1 : First workspace to compare
-@param ws2 : Second workspace to compare against
-@param severe: True to indicate that failure to verify should result in an
-exception. Otherwise a warning is generated.
-@return : true if spectrum maps match. False otherwise
-*/
-bool verifySpectrumMaps(MatrixWorkspace_const_sptr ws1,
-                        MatrixWorkspace_const_sptr ws2) {
-  auto map1 = ws1->getSpectrumToWorkspaceIndexMap();
-  auto map2 = ws2->getSpectrumToWorkspaceIndexMap();
-  if (map1 != map2) {
-    return false;
-  } else {
-    return true;
+  std::string result;
+
+  // Map the original indices to the host workspace
+  std::vector<std::vector<size_t>> hostGroups =
+      mapSpectrumIndicesToWorkspace(originWS, hostWS, detectorGroups);
+
+  // Add each group to the output, separated by ','
+
+  /// @todo Low priority: Add support to separate contiguous groups by ':' to
+  /// avoid having long lists of spectrum indices in the processing
+  /// instructions. This would not make any functional difference but would be
+  /// a cosmetic improvement when you view the history.
+  for (auto groupIt = hostGroups.begin(); groupIt != hostGroups.end();
+       ++groupIt) {
+    const auto &hostDetectors = *groupIt;
+
+    // Add each detector index to the output string separated by '+' to indicate
+    // that all detectors in this group will be summed. We also check for
+    // contiguous ranges so we output e.g. 3-5 instead of 3+4+5
+    bool contiguous = false;
+    size_t contiguousStart = 0;
+
+    for (auto it = hostDetectors.begin(); it != hostDetectors.end(); ++it) {
+      // Check if the next iterator is a contiguous increment from this one
+      auto nextIt = it + 1;
+      if (nextIt != hostDetectors.end() && *nextIt == *it + 1) {
+        // If this is a start of a new contiguous region, remember the start
+        // index
+        if (!contiguous) {
+          contiguousStart = *it;
+          contiguous = true;
+        }
+        // Continue to find the end of the contiguous region
+        continue;
+      }
+
+      if (contiguous) {
+        // Output the contiguous range, then reset the flag
+        result.append(std::to_string(contiguousStart))
+            .append("-")
+            .append(std::to_string(*it));
+        contiguousStart = 0;
+        contiguous = false;
+      } else {
+        // Just output the value
+        result.append(std::to_string(*it));
+      }
+
+      // Add a separator ready for the next value/range
+      if (nextIt != hostDetectors.end()) {
+        result.append("+");
+      }
+    }
+
+    if (groupIt + 1 != hostGroups.end()) {
+      result.append(",");
+    }
   }
+
+  return result;
 }
 }
 
@@ -77,6 +341,13 @@ void ReflectometryReductionOne2::init() {
                       "InputWorkspace", "", Direction::Input),
                   "Run to reduce.");
 
+  initReductionProperties();
+
+  // ThetaIn
+  declareProperty(make_unique<PropertyWithValue<double>>(
+                      "ThetaIn", Mantid::EMPTY_DBL(), Direction::Input),
+                  "Angle in degrees");
+
   // Processing instructions
   declareProperty(Kernel::make_unique<PropertyWithValue<std::string>>(
                       "ProcessingInstructions", "",
@@ -131,6 +402,9 @@ ReflectometryReductionOne2::validateInputs() {
 
   std::map<std::string, std::string> results;
 
+  const auto reduction = validateReductionProperties();
+  results.insert(reduction.begin(), reduction.end());
+
   const auto wavelength = validateWavelengthRanges();
   results.insert(wavelength.begin(), wavelength.end());
 
@@ -146,63 +420,36 @@ ReflectometryReductionOne2::validateInputs() {
 /** Execute the algorithm.
 */
 void ReflectometryReductionOne2::exec() {
-  MatrixWorkspace_sptr runWS = getProperty("InputWorkspace");
-
-  const auto xUnitID = runWS->getAxis(0)->unit()->unitID();
+  // Get input properties
+  m_runWS = getProperty("InputWorkspace");
+  const auto xUnitID = m_runWS->getAxis(0)->unit()->unitID();
 
   // Neither TOF or Lambda? Abort.
   if ((xUnitID != "Wavelength") && (xUnitID != "TOF"))
     throw std::invalid_argument(
         "InputWorkspace must have units of TOF or Wavelength");
 
-  // Output workspace in wavelength
-  MatrixWorkspace_sptr IvsLam;
+  m_spectrumInfo = &m_runWS->spectrumInfo();
 
-  if (xUnitID == "Wavelength") {
-    IvsLam = runWS;
-  } else {
-    // xUnitID == "TOF"
-
-    // Detector workspace
-    auto detectorWS = makeDetectorWS(runWS);
-
-    // Normalization by direct beam (optional)
-    Property *directBeamProperty = getProperty("RegionOfDirectBeam");
-    if (!directBeamProperty->isDefault()) {
-      const auto directBeam = makeDirectBeamWS(runWS);
-      detectorWS = divide(detectorWS, directBeam);
-    }
+  // Find and cache detector groups and theta0
+  findDetectorGroups();
+  findTheta0();
 
-    // Monitor workspace (only if I0MonitorIndex, MonitorBackgroundWavelengthMin
-    // and MonitorBackgroundWavelengthMax have been given)
-    Property *monProperty = getProperty("I0MonitorIndex");
-    Property *backgroundMinProperty =
-        getProperty("MonitorBackgroundWavelengthMin");
-    Property *backgroundMaxProperty =
-        getProperty("MonitorBackgroundWavelengthMin");
-    if (!monProperty->isDefault() && !backgroundMinProperty->isDefault() &&
-        !backgroundMaxProperty->isDefault()) {
-      const bool integratedMonitors =
-          getProperty("NormalizeByIntegratedMonitors");
-      const auto monitorWS = makeMonitorWS(runWS, integratedMonitors);
-      if (!integratedMonitors)
-        detectorWS = rebinDetectorsToMonitors(detectorWS, monitorWS);
-      IvsLam = divide(detectorWS, monitorWS);
-    } else {
-      IvsLam = detectorWS;
-    }
-
-    // Crop to wavelength limits
-    IvsLam = cropWavelength(IvsLam);
+  // Check whether conversion, normalisation, summation etc. need to be done
+  m_convertUnits = true;
+  m_normaliseMonitors = true;
+  m_normaliseTransmission = true;
+  m_sum = true;
+  if (xUnitID == "Wavelength") {
+    // Already converted converted to wavelength
+    m_convertUnits = false;
+    // Assume it's also already been normalised by monitors and summed
+    m_normaliseMonitors = false;
+    m_sum = false;
   }
 
-  // Transmission correction
-  MatrixWorkspace_sptr transRun = getProperty("FirstTransmissionRun");
-  if (transRun) {
-    IvsLam = transmissionCorrection(IvsLam);
-  } else if (getPropertyValue("CorrectionAlgorithm") != "None") {
-    IvsLam = algorithmicCorrection(IvsLam);
-  }
+  // Create the output workspace in wavelength
+  MatrixWorkspace_sptr IvsLam = makeIvsLam();
 
   // Convert to Q
   auto IvsQ = convertToQ(IvsLam);
@@ -211,6 +458,88 @@ void ReflectometryReductionOne2::exec() {
   setProperty("OutputWorkspace", IvsQ);
 }
 
+/**
+* Creates the output 1D array in wavelength from an input 2D workspace in
+* TOF. Summation is done over lambda or over lines of constant Q depending on
+* the type of reduction. For the latter, the output is projected to "virtual
+* lambda" at a reference angle twoThetaR.
+*
+* @return :: the output workspace in wavelength
+*/
+MatrixWorkspace_sptr ReflectometryReductionOne2::makeIvsLam() {
+  MatrixWorkspace_sptr result = m_runWS;
+
+  if (summingInQ()) {
+    if (m_convertUnits) {
+      g_log.debug("Converting input workspace to wavelength\n");
+      result = convertToWavelength(result);
+    }
+    if (m_normaliseMonitors) {
+      g_log.debug("Normalising input workspace by monitors\n");
+      result = directBeamCorrection(result);
+      result = monitorCorrection(result);
+    }
+    if (m_normaliseTransmission) {
+      g_log.debug("Normalising input workspace by transmission run\n");
+      result = transOrAlgCorrection(result, false);
+    }
+    if (m_sum) {
+      g_log.debug("Summing in Q\n");
+      result = sumInQ(result);
+    }
+  } else {
+    if (m_sum) {
+      g_log.debug("Summing in wavelength\n");
+      result = makeDetectorWS(result, m_convertUnits);
+    }
+    if (m_normaliseMonitors) {
+      g_log.debug("Normalising output workspace by monitors\n");
+      result = directBeamCorrection(result);
+      result = monitorCorrection(result);
+    }
+    if (m_normaliseTransmission) {
+      g_log.debug("Normalising output workspace by transmission run\n");
+      result = transOrAlgCorrection(result, true);
+    }
+  }
+
+  // Crop to wavelength limits
+  g_log.debug("Cropping output workspace\n");
+  result = cropWavelength(result);
+
+  return result;
+}
+
+/**
+* Normalize by monitors (only if I0MonitorIndex, MonitorBackgroundWavelengthMin
+* and MonitorBackgroundWavelengthMax have been given)
+*
+* @param detectorWS :: the detector workspace to normalise, in lambda
+* @return :: the normalized workspace in lambda
+*/
+MatrixWorkspace_sptr
+ReflectometryReductionOne2::monitorCorrection(MatrixWorkspace_sptr detectorWS) {
+  MatrixWorkspace_sptr IvsLam;
+  Property *monProperty = getProperty("I0MonitorIndex");
+  Property *backgroundMinProperty =
+      getProperty("MonitorBackgroundWavelengthMin");
+  Property *backgroundMaxProperty =
+      getProperty("MonitorBackgroundWavelengthMin");
+  if (!monProperty->isDefault() && !backgroundMinProperty->isDefault() &&
+      !backgroundMaxProperty->isDefault()) {
+    const bool integratedMonitors =
+        getProperty("NormalizeByIntegratedMonitors");
+    const auto monitorWS = makeMonitorWS(m_runWS, integratedMonitors);
+    if (!integratedMonitors)
+      detectorWS = rebinDetectorsToMonitors(detectorWS, monitorWS);
+    IvsLam = divide(detectorWS, monitorWS);
+  } else {
+    IvsLam = detectorWS;
+  }
+
+  return IvsLam;
+}
+
 /** Creates a direct beam workspace in wavelength from an input workspace in
 * TOF. This method should only be called if RegionOfDirectBeam is provided.
 *
@@ -239,30 +568,90 @@ ReflectometryReductionOne2::makeDirectBeamWS(MatrixWorkspace_sptr inputWS) {
   return directBeamWS;
 }
 
+/**
+* Normalize the workspace by the direct beam (optional)
+*
+* @param detectorWS : workspace in wavelength which is to be normalized
+* @return : corrected workspace
+*/
+MatrixWorkspace_sptr ReflectometryReductionOne2::directBeamCorrection(
+    MatrixWorkspace_sptr detectorWS) {
+
+  MatrixWorkspace_sptr normalized = detectorWS;
+  Property *directBeamProperty = getProperty("RegionOfDirectBeam");
+  if (!directBeamProperty->isDefault()) {
+    auto directBeam = makeDirectBeamWS(m_runWS);
+
+    // Rebin the direct beam workspace to be the same as the input.
+    auto rebinToWorkspaceAlg = this->createChildAlgorithm("RebinToWorkspace");
+    rebinToWorkspaceAlg->initialize();
+    rebinToWorkspaceAlg->setProperty("WorkspaceToMatch", detectorWS);
+    rebinToWorkspaceAlg->setProperty("WorkspaceToRebin", directBeam);
+    rebinToWorkspaceAlg->execute();
+    directBeam = rebinToWorkspaceAlg->getProperty("OutputWorkspace");
+
+    normalized = divide(detectorWS, directBeam);
+  }
+
+  return normalized;
+}
+
+/**
+* Perform either transmission or algorithmic correction according to the
+* settings.
+* @param detectorWS : workspace in wavelength which is to be normalized
+* @param detectorWSReduced:: whether the input detector workspace has been
+* reduced
+* @return : corrected workspace
+*/
+MatrixWorkspace_sptr ReflectometryReductionOne2::transOrAlgCorrection(
+    MatrixWorkspace_sptr detectorWS, const bool detectorWSReduced) {
+
+  MatrixWorkspace_sptr normalized;
+  MatrixWorkspace_sptr transRun = getProperty("FirstTransmissionRun");
+  if (transRun) {
+    normalized = transmissionCorrection(detectorWS, detectorWSReduced);
+  } else if (getPropertyValue("CorrectionAlgorithm") != "None") {
+    normalized = algorithmicCorrection(detectorWS);
+  } else {
+    normalized = detectorWS;
+  }
+
+  return normalized;
+}
+
 /** Perform transmission correction by running 'CreateTransmissionWorkspace' on
 * the input workspace
 * @param detectorWS :: the input workspace
+* @param detectorWSReduced:: whether the input detector workspace has been
+* reduced
 * @return :: the input workspace normalized by transmission
 */
 MatrixWorkspace_sptr ReflectometryReductionOne2::transmissionCorrection(
-    MatrixWorkspace_sptr detectorWS) {
+    MatrixWorkspace_sptr detectorWS, const bool detectorWSReduced) {
 
   const bool strictSpectrumChecking = getProperty("StrictSpectrumChecking");
-
   MatrixWorkspace_sptr transmissionWS = getProperty("FirstTransmissionRun");
-  Unit_const_sptr xUnit = transmissionWS->getAxis(0)->unit();
 
+  // Reduce the transmission workspace, if not already done (assume that if
+  // the workspace is in wavelength then it has already been reduced)
+  Unit_const_sptr xUnit = transmissionWS->getAxis(0)->unit();
   if (xUnit->unitID() == "TOF") {
 
-    // Processing instructions for transmission workspace
+    // Processing instructions for transmission workspace. If strict spectrum
+    // checking is not enabled then just use the same processing instructions
+    // that were passed in.
     std::string transmissionCommands = getProperty("ProcessingInstructions");
     if (strictSpectrumChecking) {
-      // If we have strict spectrum checking, the processing commands need to be
-      // made from the
-      // numerator workspace AND the transmission workspace based on matching
-      // spectrum numbers.
-      transmissionCommands =
-          createProcessingCommandsFromDetectorWS(detectorWS, transmissionWS);
+      // If we have strict spectrum checking, we should have the same
+      // spectrum numbers in both workspaces, but not necessarily with the
+      // same workspace indices. Therefore, map the processing instructions
+      // from the original workspace to the correct indices in the
+      // transmission workspace. Note that we use the run workspace here
+      // because the detectorWS may already have been reduced and may not
+      // contain the original spectra.
+      transmissionCommands = createProcessingCommandsFromDetectorWS(
+          m_runWS, transmissionWS, detectorGroups());
     }
 
     MatrixWorkspace_sptr secondTransmissionWS =
@@ -298,15 +687,10 @@ MatrixWorkspace_sptr ReflectometryReductionOne2::transmissionCorrection(
   rebinToWorkspaceAlg->execute();
   transmissionWS = rebinToWorkspaceAlg->getProperty("OutputWorkspace");
 
-  const bool match = verifySpectrumMaps(detectorWS, transmissionWS);
-  if (!match) {
-    const std::string message =
-        "Spectrum maps between workspaces do NOT match up.";
-    if (strictSpectrumChecking) {
-      throw std::invalid_argument(message);
-    } else {
-      g_log.warning(message);
-    }
+  // If the detector workspace has been reduced then the spectrum maps
+  // should match AFTER reducing the transmission workspace
+  if (detectorWSReduced) {
+    verifySpectrumMaps(detectorWS, transmissionWS, strictSpectrumChecking);
   }
 
   MatrixWorkspace_sptr normalized = divide(detectorWS, transmissionWS);
@@ -362,5 +746,464 @@ ReflectometryReductionOne2::convertToQ(MatrixWorkspace_sptr inputWS) {
   return IvsQ;
 }
 
+/**
+* Determine whether the reduction should sum along lines of constant
+* Q or in the default lambda.
+*
+* @return : true if the reduction should sum in Q; false otherwise
+*/
+bool ReflectometryReductionOne2::summingInQ() {
+  bool result = false;
+  const std::string summationType = getProperty("SummationType");
+
+  if (summationType == "SumInQ") {
+    result = true;
+  }
+
+  return result;
+}
+
+/**
+* Find and cache the indicies of the detectors of interest
+*/
+void ReflectometryReductionOne2::findDetectorGroups() {
+  std::string instructions = getPropertyValue("ProcessingInstructions");
+
+  m_detectorGroups = translateInstructions(instructions);
+
+  // Sort the groups by the first spectrum number in the group (to give the same
+  // output order as GroupDetectors)
+  std::sort(m_detectorGroups.begin(), m_detectorGroups.end(),
+            [](const std::vector<size_t> a, const std::vector<size_t> b) {
+              return a.front() < b.front();
+            });
+
+  if (m_detectorGroups.size() == 0) {
+    throw std::runtime_error("Invalid processing instructions");
+  }
+}
+
+/**
+* Find and cache the angle theta0 from which lines of constant Q emanate
+*/
+void ReflectometryReductionOne2::findTheta0() {
+  // Only requried if summing in Q
+  if (!summingInQ()) {
+    return;
+  }
+
+  const std::string reductionType = getProperty("ReductionType");
+
+  // For the non-flat sample case theta0 is 0
+  m_theta0 = 0.0;
+
+  if (reductionType == "DivergentBeam") {
+    // theta0 is the horizon angle, which is half the twoTheta angle of the
+    // detector position. This is the angle the detector has been rotated
+    // to, which we can get from ThetaIn
+    Property *thetaIn = getProperty("ThetaIn");
+    if (!thetaIn->isDefault()) {
+      m_theta0 = getProperty("ThetaIn");
+    } else {
+      /// @todo Currently, ThetaIn must be provided via a property. We could
+      /// calculate its value instead using
+      /// ReflectometryReductionOneAuto2::calculateTheta, which could be moved
+      /// to the base class (ReflectometryWorkflowBase2). Users normally use
+      /// ReflectometryReductionOneAuto2 though, so at the moment it isn't a
+      /// high priority to be able to calculate it here.
+      throw std::runtime_error(
+          "The ThetaIn property is required for the DivergentBeam case");
+    }
+  }
+
+  g_log.debug("theta0: " + std::to_string(theta0()) + " degrees\n");
+
+  // Convert to radians
+  m_theta0 *= M_PI / 180.0;
+}
+
+/**
+* Get the (arbitrary) reference angle twoThetaR for use for summation
+* in Q
+*
+* @return : the angle twoThetaR in radians
+* @throws : if the angle could not be found
+*/
+double
+ReflectometryReductionOne2::twoThetaR(const std::vector<size_t> &detectors) {
+  return getDetectorTwoTheta(m_spectrumInfo, twoThetaRDetectorIdx(detectors));
+}
+
+/**
+* Get the spectrum index which defines the twoThetaR reference angle
+* @return : the spectrum index
+*/
+size_t ReflectometryReductionOne2::twoThetaRDetectorIdx(
+    const std::vector<size_t> &detectors) {
+  // Get the mid-point of the area of interest
+  return detectors.front() + (detectors.back() - detectors.front()) / 2;
+}
+
+/**
+* Find the range of the projected lambda range when summing in Q
+*
+* @param detectorWS [in] : the workspace containing the values to project
+* @param detectors [in] : the workspace indices of the detectors of interest
+* @param xMin [out] : the start of the projected lambda range
+* @param xMax [out] : the end of the projected lambda range
+*/
+void ReflectometryReductionOne2::findIvsLamRange(
+    MatrixWorkspace_sptr detectorWS, const std::vector<size_t> &detectors,
+    double &xMin, double &xMax) {
+
+  // Get the max/min wavelength of region of interest
+  const double lambdaMin = getProperty("WavelengthMin");
+  const double lambdaMax = getProperty("WavelengthMax");
+
+  // Get the new max and min X values of the projected (virtual) lambda range
+  double dummy = 0.0;
+
+  const size_t spIdxMin = detectors.front();
+  const double twoThetaMin = getDetectorTwoTheta(m_spectrumInfo, spIdxMin);
+  const double bTwoThetaMin =
+      getDetectorTwoThetaRange(m_spectrumInfo, spIdxMin);
+  // For bLambda, use the average bin size for this spectrum
+  auto xValues = detectorWS->x(spIdxMin);
+  double bLambda = (xValues[xValues.size() - 1] - xValues[0]) /
+                   static_cast<int>(xValues.size());
+  getProjectedLambdaRange(lambdaMax, twoThetaMin, bLambda, bTwoThetaMin,
+                          detectors, dummy, xMax);
+
+  const size_t spIdxMax = detectors.back();
+  const double twoThetaMax = getDetectorTwoTheta(m_spectrumInfo, spIdxMax);
+  const double bTwoThetaMax =
+      getDetectorTwoThetaRange(m_spectrumInfo, spIdxMax);
+  xValues = detectorWS->x(spIdxMax);
+  bLambda = (xValues[xValues.size() - 1] - xValues[0]) /
+            static_cast<int>(xValues.size());
+  getProjectedLambdaRange(lambdaMin, twoThetaMax, bLambda, bTwoThetaMax,
+                          detectors, xMin, dummy);
+
+  if (xMin > xMax) {
+    throw std::runtime_error(
+        "Error projecting lambda range to reference line at twoTheta=" +
+        std::to_string(twoThetaR(detectors)) + "; projected range (" +
+        std::to_string(xMin) + "," + std::to_string(xMax) + ") is negative.");
+  }
+}
+
+/**
+* Construct an "empty" output workspace in virtual-lambda for summation in Q.
+* The workspace will have the same x values as the input workspace but the y
+* values will all be zero.
+*
+* @return : a 1D workspace where y values are all zero
+*/
+MatrixWorkspace_sptr
+ReflectometryReductionOne2::constructIvsLamWS(MatrixWorkspace_sptr detectorWS) {
+
+  // There is one output spectrum for each detector group
+  MatrixWorkspace_sptr outputWS =
+      WorkspaceFactory::Instance().create(detectorWS, detectorGroups().size());
+
+  const size_t numGroups = detectorGroups().size();
+  const size_t numHist = outputWS->getNumberHistograms();
+  if (numHist != numGroups) {
+    throw std::runtime_error(
+        "Error constructing IvsLam: number of output histograms " +
+        std::to_string(numHist) +
+        " does not equal the number of input detector groups " +
+        std::to_string(numGroups));
+  }
+
+  // Loop through each detector group in the input
+  for (size_t groupIdx = 0; groupIdx < numGroups; ++groupIdx) {
+    // Get the detectors in this group
+    auto &detectors = detectorGroups()[groupIdx];
+
+    // Find the X values. These are the projected lambda values for this
+    // detector group
+    double xMin = 0.0;
+    double xMax = 0.0;
+    findIvsLamRange(detectorWS, detectors, xMin, xMax);
+    // Use the same number of bins as the input
+    const int numBins = static_cast<int>(detectorWS->blocksize());
+    const double binWidth = (xMax - xMin) / (numBins + 1);
+    // Construct the histogram with these X values. Y and E values are zero.
+    const BinEdges xValues(numBins + 1, LinearGenerator(xMin, binWidth));
+    outputWS->setBinEdges(groupIdx, xValues);
+
+    // Set the detector ID from the twoThetaR detector.
+    const size_t twoThetaRIdx = twoThetaRDetectorIdx(detectors);
+    auto &outSpec = outputWS->getSpectrum(groupIdx);
+    const detid_t twoThetaRDetID =
+        m_spectrumInfo->detector(twoThetaRIdx).getID();
+    outSpec.clearDetectorIDs();
+    outSpec.addDetectorID(twoThetaRDetID);
+    // Set the spectrum number from the twoThetaR detector
+    SpectrumNumber specNum =
+        detectorWS->indexInfo().spectrumNumber(twoThetaRIdx);
+    auto indexInf = outputWS->indexInfo();
+    indexInf.setSpectrumNumbers(specNum, specNum);
+    outputWS->setIndexInfo(indexInf);
+  }
+
+  return outputWS;
+}
+
+/**
+* Sum counts from the input workspace in lambda along lines of constant Q by
+* projecting to "virtual lambda" at a reference angle twoThetaR.
+*
+* @param detectorWS [in] :: the input workspace in wavelength
+* @return :: the output workspace in wavelength
+*/
+MatrixWorkspace_sptr
+ReflectometryReductionOne2::sumInQ(MatrixWorkspace_sptr detectorWS) {
+
+  // Construct the output array in virtual lambda
+  MatrixWorkspace_sptr IvsLam = constructIvsLamWS(detectorWS);
+
+  // Loop through each input group (and corresponding output spectrum)
+  const size_t numGroups = detectorGroups().size();
+  for (size_t groupIdx = 0; groupIdx < numGroups; ++groupIdx) {
+    auto &detectors = detectorGroups()[groupIdx];
+    auto &outputE = IvsLam->dataE(groupIdx);
+
+    // Loop through each spectrum in the detector group
+    for (auto spIdx : detectors) {
+      // Get the angle of this detector and its size in twoTheta
+      const double twoTheta = getDetectorTwoTheta(m_spectrumInfo, spIdx);
+      const double bTwoTheta = getDetectorTwoThetaRange(m_spectrumInfo, spIdx);
+
+      // Check X length is Y length + 1
+      const auto &inputX = detectorWS->x(spIdx);
+      const auto &inputY = detectorWS->y(spIdx);
+      const auto &inputE = detectorWS->e(spIdx);
+      if (inputX.size() != inputY.size() + 1) {
+        throw std::runtime_error(
+            "Expected input workspace to be histogram data (got X len=" +
+            std::to_string(inputX.size()) + ", Y len=" +
+            std::to_string(inputY.size()) + ")");
+      }
+
+      // Create a vector for the projected errors for this spectrum.
+      // (Output Y values can simply be accumulated directly into the output
+      // workspace, but for error values we need to create a separate error
+      // vector for the projected errors from each input spectrum and then
+      // do an overall sum in quadrature.)
+      std::vector<double> projectedE(outputE.size(), 0.0);
+
+      // Process each value in the spectrum
+      const int ySize = static_cast<int>(inputY.size());
+      for (int inputIdx = 0; inputIdx < ySize; ++inputIdx) {
+        // Do the summation in Q
+        sumInQProcessValue(inputIdx, twoTheta, bTwoTheta, inputX, inputY,
+                           inputE, detectors, groupIdx, IvsLam, projectedE);
+      }
+
+      // Sum errors in quadrature
+      const int eSize = static_cast<int>(inputE.size());
+      for (int outIdx = 0; outIdx < eSize; ++outIdx) {
+        outputE[outIdx] += projectedE[outIdx] * projectedE[outIdx];
+      }
+    }
+
+    // Take the square root of all the accumulated squared errors for this
+    // detector group. Assumes Gaussian errors
+    double (*rs)(double) = std::sqrt;
+    std::transform(outputE.begin(), outputE.end(), outputE.begin(), rs);
+  }
+
+  return IvsLam;
+}
+
+/**
+* Share counts from an input value onto the projected output in virtual-lambda
+*
+* @param inputIdx [in] :: the index into the input arrays
+* @param twoTheta [in] :: the value of twotTheta for this spectrum
+* @param bTwoTheta [in] :: the size of the pixel in twoTheta
+* @param inputX [in] :: the input spectrum X values
+* @param inputY [in] :: the input spectrum Y values
+* @param inputE [in] :: the input spectrum E values
+* @param detectors [in] :: spectrum indices of the detectors of interest
+* @param outSpecIdx [in] :: the output spectrum index
+* @param IvsLam [in,out] :: the output workspace
+* @param outputE [in,out] :: the projected E values
+*/
+void ReflectometryReductionOne2::sumInQProcessValue(
+    const int inputIdx, const double twoTheta, const double bTwoTheta,
+    const HistogramX &inputX, const HistogramY &inputY,
+    const HistogramE &inputE, const std::vector<size_t> &detectors,
+    const size_t outSpecIdx, MatrixWorkspace_sptr IvsLam,
+    std::vector<double> &outputE) {
+
+  // Check whether there are any counts (if not, nothing to share)
+  const double inputCounts = inputY[inputIdx];
+  if (inputCounts <= 0.0 || std::isnan(inputCounts) ||
+      std::isinf(inputCounts)) {
+    return;
+  }
+  // Get the bin width and the bin centre
+  const double bLambda = getLambdaRange(inputX, inputIdx);
+  const double lambda = getLambda(inputX, inputIdx);
+  // Project these coordinates onto the virtual-lambda output (at twoThetaR)
+  double lambdaVMin = 0.0;
+  double lambdaVMax = 0.0;
+  getProjectedLambdaRange(lambda, twoTheta, bLambda, bTwoTheta, detectors,
+                          lambdaVMin, lambdaVMax);
+  // Share the input counts into the output array
+  sumInQShareCounts(inputCounts, inputE[inputIdx], bLambda, lambdaVMin,
+                    lambdaVMax, outSpecIdx, IvsLam, outputE);
+}
+
+/**
+ * Share the given input counts into the output array bins proportionally
+ * according to how much the bins overlap the given lambda range.
+ * outputX.size() must equal outputY.size() + 1
+ *
+ * @param inputCounts [in] :: the input counts to share out
+ * @param inputErr [in] :: the input errors to share out
+ * @param bLambda [in] :: the bin width in lambda
+ * @param lambdaMin [in] :: the start of the range to share counts to
+ * @param lambdaMax [in] :: the end of the range to share counts to
+ * @param outSpecIdx [in] :: the spectrum index to be updated in the output
+ * workspace
+ * @param IvsLam [in,out] :: the output workspace
+ * @param outputE [in,out] :: the projected E values
+ */
+void ReflectometryReductionOne2::sumInQShareCounts(
+    const double inputCounts, const double inputErr, const double bLambda,
+    const double lambdaMin, const double lambdaMax, const size_t outSpecIdx,
+    MatrixWorkspace_sptr IvsLam, std::vector<double> &outputE) {
+  // Check that we have histogram data
+  const auto &outputX = IvsLam->dataX(outSpecIdx);
+  auto &outputY = IvsLam->dataY(outSpecIdx);
+  if (outputX.size() != outputY.size() + 1) {
+    throw std::runtime_error(
+        "Expected output array to be histogram data (got X len=" +
+        std::to_string(outputX.size()) + ", Y len=" +
+        std::to_string(outputY.size()) + ")");
+  }
+
+  const double totalWidth = lambdaMax - lambdaMin;
+
+  // Get the first bin edge in the output X array that is within range.
+  // There will probably be some overlap, so start from the bin edge before
+  // this (unless we're already at the first bin edge).
+  auto startIter = std::lower_bound(outputX.begin(), outputX.end(), lambdaMin);
+  if (startIter != outputX.begin()) {
+    --startIter;
+  }
+
+  // Loop through all overlapping output bins. Convert the iterator to an
+  // index because we need to index both the X and Y arrays.
+  const int xSize = static_cast<int>(outputX.size());
+  for (auto outIdx = startIter - outputX.begin(); outIdx < xSize - 1;
+       ++outIdx) {
+    const double binStart = outputX[outIdx];
+    const double binEnd = outputX[outIdx + 1];
+    if (binStart > lambdaMax) {
+      // No longer in the overlap region so we're finished
+      break;
+    }
+    // Add a share of the input counts to this bin based on the proportion of
+    // overlap.
+    const double overlapWidth =
+        std::min({bLambda, lambdaMax - binStart, binEnd - lambdaMin});
+    const double fraction = overlapWidth / totalWidth;
+    outputY[outIdx] += inputCounts * fraction;
+    outputE[outIdx] += inputErr * fraction;
+  }
+}
+
+/**
+* Project an input pixel onto an arbitrary reference line at twoThetaR. The
+* projection is done along lines of constant Q, which emanate from theta0. The
+* top-left and bottom-right corners of the pixel are projected, resulting in an
+* output range in "virtual" lambda (lambdaV).
+*
+* For a description of this projection, see:
+*   R. Cubitt, T. Saerbeck, R.A. Campbell, R. Barker, P. Gutfreund
+*   J. Appl. Crystallogr., 48 (6) (2015)
+*
+* @param lambda [in] :: the lambda coord of the centre of the pixel to project
+* @param twoTheta [in] :: the twoTheta coord of the centre of the pixel to
+*project
+* @param bLambda [in] :: the pixel size in lambda
+* @param bTwoTheta [in] :: the pixel size in twoTheta
+* @param detectors [in] :: spectrum indices of the detectors of interest
+* @param lambdaVMin [out] :: the projected range start
+* @param lambdaVMax [out] :: the projected range end
+*/
+void ReflectometryReductionOne2::getProjectedLambdaRange(
+    const double lambda, const double twoTheta, const double bLambda,
+    const double bTwoTheta, const std::vector<size_t> &detectors,
+    double &lambdaVMin, double &lambdaVMax) {
+
+  // Get the angle from twoThetaR to this detector
+  const double twoThetaRVal = twoThetaR(detectors);
+  // Get the distance from the pixel to twoThetaR
+  const double gamma = twoTheta - twoThetaRVal;
+  // Get the angle from the horizon to the reference angle
+  const double horizonThetaR = twoThetaRVal - theta0();
+
+  // Calculate the projected wavelength range
+  try {
+    const double lambdaTop = std::sin(horizonThetaR) *
+                             (lambda + bLambda / 2.0) /
+                             std::sin(horizonThetaR + gamma - bTwoTheta / 2.0);
+    const double lambdaBot = std::sin(horizonThetaR) *
+                             (lambda - bLambda / 2.0) /
+                             std::sin(horizonThetaR + gamma + bTwoTheta / 2.0);
+    lambdaVMin = std::min(lambdaTop, lambdaBot);
+    lambdaVMax = std::max(lambdaTop, lambdaBot);
+  } catch (std::exception &ex) {
+    throw std::runtime_error(
+        "Failed to project (lambda, twoTheta) = (" + std::to_string(lambda) +
+        "," + std::to_string(twoTheta * 180.0 / M_PI) + ") onto twoThetaR = " +
+        std::to_string(twoThetaRVal) + ": " + ex.what());
+  }
+}
+
+/**
+Check whether the spectra for the given workspaces are the same.
+
+@param ws1 : First workspace to compare
+@param ws2 : Second workspace to compare against
+@param severe: True to indicate that failure to verify should result in an
+exception. Otherwise a warning is generated.
+*/
+void ReflectometryReductionOne2::verifySpectrumMaps(
+    MatrixWorkspace_const_sptr ws1, MatrixWorkspace_const_sptr ws2,
+    const bool severe) {
+
+  bool mismatch = false;
+  // Check that the number of histograms is the same
+  if (ws1->getNumberHistograms() != ws2->getNumberHistograms()) {
+    mismatch = true;
+  }
+  // Check that the spectrum numbers match for each histogram
+  if (!mismatch) {
+    for (size_t i = 0; i < ws1->getNumberHistograms(); ++i) {
+      if (ws1->indexInfo().spectrumNumber(i) !=
+          ws2->indexInfo().spectrumNumber(i)) {
+        mismatch = true;
+        break;
+      }
+    }
+  }
+  // Handle if error
+  if (mismatch) {
+    const std::string message =
+        "Spectrum maps between workspaces do NOT match up.";
+    if (severe) {
+      throw std::invalid_argument(message);
+    } else {
+      g_log.warning(message);
+    }
+  }
+}
 } // namespace Algorithms
 } // namespace Mantid
diff --git a/Framework/Algorithms/src/ReflectometryReductionOneAuto2.cpp b/Framework/Algorithms/src/ReflectometryReductionOneAuto2.cpp
index e5e8ebb3f421abe0807a01868c05b5c125c7d694..ddc0e55ed42e8198efee8b3a3c0401ed893c1f03 100644
--- a/Framework/Algorithms/src/ReflectometryReductionOneAuto2.cpp
+++ b/Framework/Algorithms/src/ReflectometryReductionOneAuto2.cpp
@@ -112,6 +112,9 @@ void ReflectometryReductionOneAuto2::init() {
           "InputWorkspace", "", Direction::Input, PropertyMode::Mandatory),
       "Input run in TOF or wavelength");
 
+  // Reduction type
+  initReductionProperties();
+
   // Analysis mode
   const std::vector<std::string> analysisMode{"PointDetectorAnalysis",
                                               "MultiDetectorAnalysis"};
@@ -228,7 +231,8 @@ void ReflectometryReductionOneAuto2::exec() {
   alg->initialize();
 
   // Mandatory properties
-
+  alg->setProperty("SummationType", getPropertyValue("SummationType"));
+  alg->setProperty("ReductionType", getPropertyValue("ReductionType"));
   double wavMin = checkForMandatoryInstrumentDefault<double>(
       this, "WavelengthMin", instrument, "LambdaMin");
   alg->setProperty("WavelengthMin", wavMin);
@@ -250,6 +254,7 @@ void ReflectometryReductionOneAuto2::exec() {
     // Calculate theta
     theta = calculateTheta(instructions, inputWS);
   }
+  alg->setProperty("ThetaIn", theta);
 
   // Optional properties
 
@@ -294,7 +299,10 @@ void ReflectometryReductionOneAuto2::exec() {
     setProperty("ScaleFactor", 1.0);
 }
 
-/** Returns the detectors of interest, specified via processing instructions
+/** Returns the detectors of interest, specified via processing instructions.
+* Note that this returns the names of the parent detectors of the first and
+* last spectrum indices in the processing instructions. It is assumed that all
+* the interim detectors have the same parent.
 *
 * @param instructions :: processing instructions defining detectors of interest
 * @param inputWS :: the input workspace
@@ -304,24 +312,30 @@ std::vector<std::string> ReflectometryReductionOneAuto2::getDetectorNames(
     const std::string &instructions, MatrixWorkspace_sptr inputWS) {
 
   std::vector<std::string> wsIndices;
-  boost::split(wsIndices, instructions, boost::is_any_of(":,-"));
+  boost::split(wsIndices, instructions, boost::is_any_of(":,-+"));
   // vector of comopnents
   std::vector<std::string> detectors;
 
-  for (const auto wsIndex : wsIndices) {
+  try {
+    for (const auto wsIndex : wsIndices) {
 
-    size_t index = boost::lexical_cast<size_t>(wsIndex);
+      size_t index = boost::lexical_cast<size_t>(wsIndex);
 
-    auto detector = inputWS->getDetector(index);
-    auto parent = detector->getParent();
+      auto detector = inputWS->getDetector(index);
+      auto parent = detector->getParent();
 
-    if (parent) {
-      auto parentType = parent->type();
-      auto detectorName = (parentType == "Instrument") ? detector->getName()
-                                                       : parent->getName();
-      detectors.push_back(detectorName);
+      if (parent) {
+        auto parentType = parent->type();
+        auto detectorName = (parentType == "Instrument") ? detector->getName()
+                                                         : parent->getName();
+        detectors.push_back(detectorName);
+      }
     }
+  } catch (boost::bad_lexical_cast &) {
+    throw std::runtime_error("Invalid processing instructions: " +
+                             instructions);
   }
+
   return detectors;
 }
 
diff --git a/Framework/Algorithms/src/ReflectometryWorkflowBase2.cpp b/Framework/Algorithms/src/ReflectometryWorkflowBase2.cpp
index 7499ff6d92a8191970397a7d1d148dfc0361df69..9be7cfe924e54088b4b4566b55e619b4b08094cd 100644
--- a/Framework/Algorithms/src/ReflectometryWorkflowBase2.cpp
+++ b/Framework/Algorithms/src/ReflectometryWorkflowBase2.cpp
@@ -4,7 +4,9 @@
 #include "MantidAPI/WorkspaceUnitValidator.h"
 #include "MantidGeometry/Instrument.h"
 #include "MantidKernel/ArrayProperty.h"
+#include "MantidKernel/CompositeValidator.h"
 #include "MantidKernel/ListValidator.h"
+#include "MantidKernel/MandatoryValidator.h"
 #include "MantidKernel/RebinParamsValidator.h"
 #include "MantidKernel/Unit.h"
 
@@ -15,6 +17,23 @@ using namespace Mantid::Geometry;
 namespace Mantid {
 namespace Algorithms {
 
+/** Initialize properties related to the type of reduction
+*/
+void ReflectometryWorkflowBase2::initReductionProperties() {
+  // Summation type
+  std::vector<std::string> summationTypes = {"SumInLambda", "SumInQ"};
+  declareProperty("SummationType", "SumInLambda",
+                  boost::make_shared<StringListValidator>(summationTypes),
+                  "The type of summation to perform.", Direction::Input);
+
+  // Reduction type
+  std::vector<std::string> reductionTypes = {"Normal", "DivergentBeam",
+                                             "NonFlatSample"};
+  declareProperty("ReductionType", "Normal",
+                  boost::make_shared<StringListValidator>(reductionTypes),
+                  "The type of reduction to perform.", Direction::Input);
+}
+
 /** Initialize properties related to direct beam normalization
 */
 void ReflectometryWorkflowBase2::initDirectBeamProperties() {
@@ -178,6 +197,33 @@ void ReflectometryWorkflowBase2::initMomentumTransferProperties() {
                   "Factor you wish to scale Q workspace by.", Direction::Input);
 }
 
+/** Validate reduction properties, if given
+*
+* @return :: A map with results of validation
+*/
+std::map<std::string, std::string>
+ReflectometryWorkflowBase2::validateReductionProperties() const {
+
+  std::map<std::string, std::string> results;
+
+  // If summing in Q, then reduction type must be given
+  const std::string summationType = getProperty("SummationType");
+  const std::string reductionType = getProperty("ReductionType");
+  if (summationType == "SumInQ") {
+    if (reductionType == "Normal") {
+      results["ReductionType"] =
+          "ReductionType must be set if SummationType is SumInQ";
+    }
+  } else {
+    if (reductionType != "Normal") {
+      results["ReductionType"] =
+          "ReductionType should not be set unless SummationType is SumInQ";
+    }
+  }
+
+  return results;
+}
+
 /** Validate direct beam if given
 *
 * @return :: A map with results of validation
@@ -320,10 +366,12 @@ ReflectometryWorkflowBase2::cropWavelength(MatrixWorkspace_sptr inputWS) {
 /** Process an input workspace in TOF according to specified processing commands
 * to get a detector workspace in wavelength.
 * @param inputWS :: the input workspace in TOF
+* @param convert :: whether the result should be converted to wavelength
 * @return :: the detector workspace in wavelength
 */
 MatrixWorkspace_sptr
-ReflectometryWorkflowBase2::makeDetectorWS(MatrixWorkspace_sptr inputWS) {
+ReflectometryWorkflowBase2::makeDetectorWS(MatrixWorkspace_sptr inputWS,
+                                           const bool convert) {
 
   const std::string processingCommands =
       getPropertyValue("ProcessingInstructions");
@@ -334,7 +382,9 @@ ReflectometryWorkflowBase2::makeDetectorWS(MatrixWorkspace_sptr inputWS) {
   groupAlg->execute();
   MatrixWorkspace_sptr detectorWS = groupAlg->getProperty("OutputWorkspace");
 
-  detectorWS = convertToWavelength(detectorWS);
+  if (convert) {
+    detectorWS = convertToWavelength(detectorWS);
+  }
 
   return detectorWS;
 }
diff --git a/Framework/Algorithms/src/SampleCorrections/RectangularBeamProfile.cpp b/Framework/Algorithms/src/SampleCorrections/RectangularBeamProfile.cpp
index b3aa0b2168d826c9b6c507eb66a2176a538befa2..5503e62b37bc6c196efa3b28c478ed43d302d248 100644
--- a/Framework/Algorithms/src/SampleCorrections/RectangularBeamProfile.cpp
+++ b/Framework/Algorithms/src/SampleCorrections/RectangularBeamProfile.cpp
@@ -90,10 +90,10 @@ RectangularBeamProfile::defineActiveRegion(const API::Sample &sample) const {
   const auto &sampleMin(sampleBox.minPoint());
   const auto &sampleMax(sampleBox.maxPoint());
   V3D minPoint, maxPoint;
-  minPoint[m_horIdx] = m_min[m_horIdx];
-  maxPoint[m_horIdx] = m_min[m_horIdx] + m_width;
-  minPoint[m_upIdx] = m_min[m_upIdx];
-  maxPoint[m_upIdx] = m_min[m_upIdx] + m_height;
+  minPoint[m_horIdx] = std::max(sampleMin[m_horIdx], m_min[m_horIdx]);
+  maxPoint[m_horIdx] = std::min(sampleMax[m_horIdx], m_min[m_horIdx] + m_width);
+  minPoint[m_upIdx] = std::max(sampleMin[m_upIdx], m_min[m_upIdx]);
+  maxPoint[m_upIdx] = std::min(sampleMax[m_upIdx], m_min[m_upIdx] + m_height);
   minPoint[m_beamIdx] = sampleMin[m_beamIdx];
   maxPoint[m_beamIdx] = sampleMax[m_beamIdx];
 
diff --git a/Framework/Algorithms/src/SmoothNeighbours.cpp b/Framework/Algorithms/src/SmoothNeighbours.cpp
index f611844678ece60a66e0ff007acaf072f14c5e3c..c49e29e6d6c6884d3fe7bbfcc7f3cc1c059e4ad7 100644
--- a/Framework/Algorithms/src/SmoothNeighbours.cpp
+++ b/Framework/Algorithms/src/SmoothNeighbours.cpp
@@ -1,7 +1,7 @@
 #include "MantidAlgorithms/SmoothNeighbours.h"
 #include "MantidAPI/DetectorInfo.h"
 #include "MantidAPI/InstrumentValidator.h"
-#include "MantidAPI/NearestNeighbourInfo.h"
+#include "MantidAPI/WorkspaceNearestNeighbourInfo.h"
 #include "MantidAPI/SpectrumInfo.h"
 #include "MantidAPI/WorkspaceFactory.h"
 #include "MantidDataObjects/EventList.h"
@@ -329,7 +329,8 @@ void SmoothNeighbours::findNeighboursUbiqutious() {
   m_neighbours.resize(inWS->getNumberHistograms());
 
   bool ignoreMaskedDetectors = getProperty("IgnoreMaskedDetectors");
-  NearestNeighbourInfo neighbourInfo(*inWS, ignoreMaskedDetectors, nNeighbours);
+  WorkspaceNearestNeighbourInfo neighbourInfo(*inWS, ignoreMaskedDetectors,
+                                              nNeighbours);
 
   // Cull by radius
   RadiusFilter radiusFilter(Radius);
diff --git a/Framework/Algorithms/src/SofQWNormalisedPolygon.cpp b/Framework/Algorithms/src/SofQWNormalisedPolygon.cpp
index cf79fb878c1e0b106a7751805aefd53d5260ca7d..fcb3b55fb15fd25e088fee3cb2308ceaa1791546 100644
--- a/Framework/Algorithms/src/SofQWNormalisedPolygon.cpp
+++ b/Framework/Algorithms/src/SofQWNormalisedPolygon.cpp
@@ -1,7 +1,7 @@
 #include "MantidAlgorithms/SofQWNormalisedPolygon.h"
 #include "MantidAlgorithms/SofQW.h"
 #include "MantidAPI/BinEdgeAxis.h"
-#include "MantidAPI/NearestNeighbourInfo.h"
+#include "MantidAPI/WorkspaceNearestNeighbourInfo.h"
 #include "MantidAPI/SpectrumDetectorMapping.h"
 #include "MantidAPI/SpectrumInfo.h"
 #include "MantidAPI/WorkspaceFactory.h"
@@ -334,7 +334,8 @@ void SofQWNormalisedPolygon::initAngularCachesPSD(
 
   bool ignoreMasked = true;
   const int numNeighbours = 4;
-  NearestNeighbourInfo neighbourInfo(*workspace, ignoreMasked, numNeighbours);
+  WorkspaceNearestNeighbourInfo neighbourInfo(*workspace, ignoreMasked,
+                                              numNeighbours);
 
   this->m_theta = std::vector<double>(nHistos);
   this->m_thetaWidths = std::vector<double>(nHistos);
diff --git a/Framework/Algorithms/src/SpatialGrouping.cpp b/Framework/Algorithms/src/SpatialGrouping.cpp
index d41ff40179f70db563b0822b38d3b78853d6f23c..19d779c3a8f40b202efe9b4dc93bebc6643f7ffc 100644
--- a/Framework/Algorithms/src/SpatialGrouping.cpp
+++ b/Framework/Algorithms/src/SpatialGrouping.cpp
@@ -82,7 +82,7 @@ void SpatialGrouping::exec() {
   Mantid::API::Progress prog(this, 0.0, 1.0, m_positions.size());
 
   bool ignoreMaskedDetectors = false;
-  m_neighbourInfo = Kernel::make_unique<API::NearestNeighbourInfo>(
+  m_neighbourInfo = Kernel::make_unique<API::WorkspaceNearestNeighbourInfo>(
       *inputWorkspace, ignoreMaskedDetectors);
 
   for (size_t i = 0; i < inputWorkspace->getNumberHistograms(); ++i) {
diff --git a/Framework/Algorithms/test/AnnularRingAbsorptionTest.h b/Framework/Algorithms/test/AnnularRingAbsorptionTest.h
index c3131a7605dfc62b27e0a04b6682aa016d4bf9a6..368464508ac3f10825c77ffae089bb965b12668d 100644
--- a/Framework/Algorithms/test/AnnularRingAbsorptionTest.h
+++ b/Framework/Algorithms/test/AnnularRingAbsorptionTest.h
@@ -43,11 +43,11 @@ public:
     MatrixWorkspace_sptr outWS = alg->getProperty("OutputWorkspace");
     TS_ASSERT(outWS);
 
-    const double delta(1e-08);
+    const double delta(1e-04);
     const size_t middle_index = 4;
-    TS_ASSERT_DELTA(0.96859812, outWS->readY(0).front(), delta);
-    TS_ASSERT_DELTA(0.79254304, outWS->readY(0)[middle_index], delta);
-    TS_ASSERT_DELTA(0.67064972, outWS->readY(0).back(), delta);
+    TS_ASSERT_DELTA(0.9694, outWS->readY(0).front(), delta);
+    TS_ASSERT_DELTA(0.8035, outWS->readY(0)[middle_index], delta);
+    TS_ASSERT_DELTA(0.6530, outWS->readY(0).back(), delta);
   }
 
   //-------------------- Failure cases --------------------------------
diff --git a/Framework/Algorithms/test/ConvertUnitsTest.h b/Framework/Algorithms/test/ConvertUnitsTest.h
index 1b493a0c46d4b31381e56de29052160fcf4d19d7..95460c9c0f873323e301e441b8ee8a705e57c947 100644
--- a/Framework/Algorithms/test/ConvertUnitsTest.h
+++ b/Framework/Algorithms/test/ConvertUnitsTest.h
@@ -608,6 +608,23 @@ public:
     // Check EMode has been set
     TS_ASSERT_EQUALS(Mantid::Kernel::DeltaEMode::Direct, output->getEMode());
 
+    ConvertUnits conv4;
+    conv4.initialize();
+    conv4.setProperty("InputWorkspace", ws);
+    conv4.setPropertyValue("OutputWorkspace", outputSpace);
+    conv4.setPropertyValue("Target", "dSpacingPerpendicular");
+    conv4.setPropertyValue("Emode", "Direct");
+    conv4.execute();
+
+    TS_ASSERT_THROWS_NOTHING(
+        output = AnalysisDataService::Instance().retrieveWS<MatrixWorkspace>(
+            outputSpace));
+    TS_ASSERT_EQUALS(output->getAxis(0)->unit()->unitID(),
+                     "dSpacingPerpendicular");
+    TS_ASSERT_EQUALS(output->blocksize(), 2663);
+    // Check EMode has been set
+    TS_ASSERT_EQUALS(Mantid::Kernel::DeltaEMode::Direct, output->getEMode());
+
     AnalysisDataService::Instance().remove(outputSpace);
   }
 
diff --git a/Framework/Algorithms/test/FindEPPTest.h b/Framework/Algorithms/test/FindEPPTest.h
new file mode 100644
index 0000000000000000000000000000000000000000..cc12ab85b14d20e3fa71264cfb2750a839499cba
--- /dev/null
+++ b/Framework/Algorithms/test/FindEPPTest.h
@@ -0,0 +1,257 @@
+#ifndef MANTID_ALGORITHMS_FINDEPPTEST_H_
+#define MANTID_ALGORITHMS_FINDEPPTEST_H_
+
+#include <cxxtest/TestSuite.h>
+
+#include "MantidAlgorithms/FindEPP.h"
+#include "MantidAlgorithms/CreateSampleWorkspace.h"
+
+#include "MantidAPI/AnalysisDataService.h"
+#include "MantidAPI/FrameworkManager.h"
+#include "MantidAPI/ITableWorkspace.h"
+#include "MantidAPI/MatrixWorkspace.h"
+#include "MantidAPI/WorkspaceFactory.h"
+
+using namespace Mantid::Algorithms;
+using namespace Mantid::API;
+
+namespace {
+enum WorkspaceType : size_t {
+  NegativeMaximum = 0,
+  NarrowPeak = 1,
+  FitFailed = 2,
+  Success = 3,
+  Performance = 4
+};
+
+MatrixWorkspace_sptr _create_test_workspace(WorkspaceType type) {
+
+  CreateSampleWorkspace createAlg;
+
+  if (type != NegativeMaximum) {
+    createAlg.initialize();
+    createAlg.setProperty("BankPixelWidth", 1);
+    createAlg.setPropertyValue("OutputWorkspace", "__ws");
+    createAlg.setLogging(false);
+    createAlg.setChild(true);
+  }
+
+  switch (type) {
+
+  case NegativeMaximum: {
+    size_t nBins = 5;
+    MatrixWorkspace_sptr result =
+        WorkspaceFactory::Instance().create("Workspace2D", 1, nBins, nBins);
+    for (size_t bin = 0; bin < nBins; ++bin) {
+      result->mutableY(0)[bin] = -1.;
+      result->mutableX(0)[bin] = double(bin);
+    }
+    return result;
+  }
+
+  case NarrowPeak: {
+    createAlg.setPropertyValue("Function", "User Defined");
+    createAlg.setPropertyValue(
+        "UserDefinedFunction",
+        "name=Gaussian, PeakCentre=5, Height=1, Sigma=0.05");
+    createAlg.setProperty("XMin", 0.);
+    createAlg.setProperty("XMax", 10.);
+    createAlg.setProperty("BinWidth", 0.1);
+    createAlg.setProperty("NumBanks", 1);
+    break;
+  }
+
+  case FitFailed: {
+    createAlg.setPropertyValue("Function", "Exp Decay");
+    createAlg.setProperty("XMin", 0.);
+    createAlg.setProperty("XMax", 100.);
+    createAlg.setProperty("BinWidth", 1.);
+    createAlg.setProperty("NumBanks", 1);
+    break;
+  }
+
+  case Success: {
+    createAlg.setPropertyValue("Function", "User Defined");
+    createAlg.setPropertyValue("UserDefinedFunction",
+                               "name=LinearBackground,A0=0.3;"
+                               "name=Gaussian,"
+                               "PeakCentre=6000, Height=5, Sigma=75");
+    createAlg.setProperty("XMin", 4005.75);
+    createAlg.setProperty("XMax", 7995.75);
+    createAlg.setProperty("BinWidth", 10.5);
+    createAlg.setProperty("NumBanks", 2);
+    break;
+  }
+
+  case Performance: {
+    createAlg.setPropertyValue("Function", "User Defined");
+    createAlg.setPropertyValue("UserDefinedFunction",
+                               "name=LinearBackground,A0=0.3,A1=0.001;"
+                               "name=Gaussian,"
+                               "PeakCentre=6000, Height=5, Sigma=75");
+    createAlg.setProperty("XMin", 4005.75);
+    createAlg.setProperty("XMax", 7995.75);
+    createAlg.setProperty("BinWidth", 5.01);
+    createAlg.setProperty("NumBanks", 100);
+    createAlg.setProperty("BankPixelWidth", 10);
+    createAlg.setProperty("Random", true);
+    break;
+  }
+  }
+
+  createAlg.execute();
+  return createAlg.getProperty("OutputWorkspace");
+}
+}
+
+class FindEPPTest : public CxxTest::TestSuite {
+public:
+  // This pair of boilerplate methods prevent the suite being created statically
+  // This means the constructor isn't called when running other tests
+  static FindEPPTest *createSuite() { return new FindEPPTest(); }
+  static void destroySuite(FindEPPTest *suite) { delete suite; }
+
+  FindEPPTest()
+      : m_columnNames({"WorkspaceIndex", "PeakCentre", "PeakCentreError",
+                       "Sigma", "SigmaError", "Height", "HeightError", "chiSq",
+                       "FitStatus"}),
+        m_delta(1E-4) {
+    FrameworkManager::Instance();
+  }
+
+  void test_init() {
+    FindEPP alg;
+    TS_ASSERT_THROWS_NOTHING(alg.initialize());
+    TS_ASSERT(alg.isInitialized());
+  }
+
+  void test_success() {
+    MatrixWorkspace_sptr inputWS = _create_test_workspace(Success);
+
+    FindEPP alg;
+    alg.setChild(true);
+    alg.setLogging(false);
+
+    TS_ASSERT_THROWS_NOTHING(alg.initialize());
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("InputWorkspace", inputWS));
+    TS_ASSERT_THROWS_NOTHING(
+        alg.setPropertyValue("OutputWorkspace", "__unused_for_child"));
+    TS_ASSERT_THROWS_NOTHING(alg.execute());
+    TS_ASSERT(alg.isExecuted());
+
+    ITableWorkspace_sptr outputWS = alg.getProperty("OutputWorkspace");
+    _check_table(outputWS, 2);
+
+    for (size_t row = 0; row < 2; ++row) {
+      TS_ASSERT_EQUALS(outputWS->cell<std::string>(row, 8), "success");
+      TS_ASSERT_DELTA(outputWS->cell<double>(row, 1), 6005.25, m_delta);
+      TS_ASSERT_DELTA(outputWS->cell<double>(row, 2), 8.817, m_delta);
+      TS_ASSERT_DELTA(outputWS->cell<double>(row, 3), 89.3248, m_delta);
+      TS_ASSERT_DELTA(outputWS->cell<double>(row, 4), 7.2306, m_delta);
+      TS_ASSERT_DELTA(outputWS->cell<double>(row, 5), 4.8384, m_delta);
+      TS_ASSERT_DELTA(outputWS->cell<double>(row, 6), 0.6161, m_delta);
+      TS_ASSERT_DELTA(outputWS->cell<double>(row, 7), 0.1643, m_delta);
+    }
+  }
+
+  void test_negativeMaximum() {
+    MatrixWorkspace_sptr inputWS = _create_test_workspace(NegativeMaximum);
+
+    FindEPP alg;
+    alg.setChild(true);
+    alg.setLogging(false);
+
+    TS_ASSERT_THROWS_NOTHING(alg.initialize());
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("InputWorkspace", inputWS));
+    TS_ASSERT_THROWS_NOTHING(
+        alg.setPropertyValue("OutputWorkspace", "__unused_for_child"));
+    TS_ASSERT_THROWS_NOTHING(alg.execute());
+    TS_ASSERT(alg.isExecuted());
+
+    ITableWorkspace_sptr outputWS = alg.getProperty("OutputWorkspace");
+    _check_table(outputWS, 1);
+
+    TS_ASSERT_EQUALS(outputWS->cell<std::string>(0, 8), "negativeMaximum");
+    TS_ASSERT_DELTA(outputWS->cell<double>(0, 1), 0., m_delta);
+  }
+
+  void test_narrowPeak() {
+    MatrixWorkspace_sptr inputWS = _create_test_workspace(NarrowPeak);
+
+    FindEPP alg;
+    alg.setChild(true);
+    alg.setLogging(false);
+
+    TS_ASSERT_THROWS_NOTHING(alg.initialize());
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("InputWorkspace", inputWS));
+    TS_ASSERT_THROWS_NOTHING(
+        alg.setPropertyValue("OutputWorkspace", "__unused_for_child"));
+    TS_ASSERT_THROWS_NOTHING(alg.execute());
+    TS_ASSERT(alg.isExecuted());
+
+    ITableWorkspace_sptr outputWS = alg.getProperty("OutputWorkspace");
+    _check_table(outputWS, 1);
+
+    TS_ASSERT_EQUALS(outputWS->cell<std::string>(0, 8), "narrowPeak");
+    TS_ASSERT_DELTA(outputWS->cell<double>(0, 1), 5., m_delta);
+  }
+
+  void test_fitFailed() {
+    MatrixWorkspace_sptr inputWS = _create_test_workspace(FitFailed);
+
+    FindEPP alg;
+    alg.setChild(true);
+    alg.setLogging(false);
+
+    TS_ASSERT_THROWS_NOTHING(alg.initialize());
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("InputWorkspace", inputWS));
+    TS_ASSERT_THROWS_NOTHING(
+        alg.setPropertyValue("OutputWorkspace", "__unused_for_child"));
+    TS_ASSERT_THROWS_NOTHING(alg.execute());
+    TS_ASSERT(alg.isExecuted());
+
+    ITableWorkspace_sptr outputWS = alg.getProperty("OutputWorkspace");
+    _check_table(outputWS, 1);
+
+    TS_ASSERT_EQUALS(outputWS->cell<std::string>(0, 8), "fitFailed");
+    TS_ASSERT_DELTA(outputWS->cell<double>(0, 1), 0., m_delta);
+  }
+
+private:
+  void _check_table(ITableWorkspace_sptr ws, size_t nSpectra) {
+    TS_ASSERT_EQUALS(ws->rowCount(), nSpectra);
+    TS_ASSERT_EQUALS(ws->columnCount(), 9);
+    TS_ASSERT_EQUALS(ws->getColumnNames(), m_columnNames);
+  }
+  std::vector<std::string> m_columnNames;
+  double m_delta;
+};
+
+class FindEPPTestPerformance : public CxxTest::TestSuite {
+public:
+  static FindEPPTestPerformance *createSuite() {
+    return new FindEPPTestPerformance();
+  }
+  static void destroySuite(FindEPPTestPerformance *suite) { delete suite; }
+
+  FindEPPTestPerformance() {}
+
+  void setUp() override {
+    FrameworkManager::Instance();
+    MatrixWorkspace_sptr in = _create_test_workspace(Performance);
+    m_alg.initialize();
+    m_alg.setProperty("InputWorkspace", in);
+    m_alg.setProperty("OutputWorkspace", "__out_ws");
+  }
+
+  void tearDown() override {
+    AnalysisDataService::Instance().remove("__out_ws");
+  }
+
+  void test_performance() { m_alg.execute(); }
+
+private:
+  FindEPP m_alg;
+};
+
+#endif /* MANTID_ALGORITHMS_FINDEPPTEST_H_ */
diff --git a/Framework/Algorithms/test/RectangularBeamProfileTest.h b/Framework/Algorithms/test/RectangularBeamProfileTest.h
index 973a622ca00d9708c449a1db986fc6b036147363..32b043458930dc56387350a7714a069d0ab64981 100644
--- a/Framework/Algorithms/test/RectangularBeamProfileTest.h
+++ b/Framework/Algorithms/test/RectangularBeamProfileTest.h
@@ -82,7 +82,22 @@ public:
     TS_ASSERT_EQUALS(V3D(1.0, 0, 0), ray.unitDir);
   }
 
-  void test_DefineActiveRegion() {
+  void test_DefineActiveRegion_beam_larger_than_sample() {
+    using Mantid::API::Sample;
+    using Mantid::Kernel::V3D;
+    const double width(3.3), height(6.9);
+    const V3D center;
+    RectangularBeamProfile profile(createTestFrame(), center, width, height);
+    Sample testSample;
+    testSample.setShape(*ComponentCreationHelper::createSphere(0.5));
+
+    auto region = profile.defineActiveRegion(testSample);
+    TS_ASSERT(region.isNonNull());
+    TS_ASSERT_EQUALS(V3D(-0.5, -0.5, -0.5), region.minPoint());
+    TS_ASSERT_EQUALS(V3D(0.5, 0.5, 0.5), region.maxPoint());
+  }
+
+  void test_DefineActiveRegion_beam_smaller_than_sample() {
     using Mantid::API::Sample;
     using Mantid::Kernel::V3D;
     const double width(0.1), height(0.2);
diff --git a/Framework/Algorithms/test/ReflectometryReductionOne2Test.h b/Framework/Algorithms/test/ReflectometryReductionOne2Test.h
index c1cb58710a78e1e39f6d2d211f5b132b72a725c2..e4264b1458229f2be965a0a3821933257d707609 100644
--- a/Framework/Algorithms/test/ReflectometryReductionOne2Test.h
+++ b/Framework/Algorithms/test/ReflectometryReductionOne2Test.h
@@ -19,7 +19,7 @@ using namespace WorkspaceCreationHelper;
 class ReflectometryReductionOne2Test : public CxxTest::TestSuite {
 private:
   MatrixWorkspace_sptr m_multiDetectorWS;
-  MatrixWorkspace_sptr m_wavelengthWS;
+  MatrixWorkspace_sptr m_transmissionWS;
 
 public:
   // This pair of boilerplate methods prevent the suite being created statically
@@ -35,11 +35,21 @@ public:
     FrameworkManager::Instance();
     // A multi detector ws
     m_multiDetectorWS =
-        create2DWorkspaceWithReflectometryInstrumentMultiDetector();
-    // A workspace in wavelength
-    m_wavelengthWS =
-        create2DWorkspaceWithReflectometryInstrumentMultiDetector();
-    m_wavelengthWS->getAxis(0)->setUnit("Wavelength");
+        create2DWorkspaceWithReflectometryInstrumentMultiDetector(0, 0.1);
+    // A transmission ws with different spectrum numbers to the run
+    m_transmissionWS =
+        create2DWorkspaceWithReflectometryInstrumentMultiDetector(0, 0.1);
+    m_transmissionWS->getSpectrum(0).setSpectrumNo(2);
+    m_transmissionWS->getSpectrum(1).setSpectrumNo(3);
+    m_transmissionWS->getSpectrum(2).setSpectrumNo(4);
+    m_transmissionWS->getSpectrum(3).setSpectrumNo(5);
+    // Set different values in each spectrum so that we can check the correct
+    // spectra were used for the transmission correction
+    using namespace Mantid::HistogramData;
+    m_transmissionWS->setCounts(0, Counts(m_transmissionWS->y(0).size(), 10));
+    m_transmissionWS->setCounts(1, Counts(m_transmissionWS->y(1).size(), 20));
+    m_transmissionWS->setCounts(2, Counts(m_transmissionWS->y(2).size(), 30));
+    m_transmissionWS->setCounts(3, Counts(m_transmissionWS->y(3).size(), 40));
   }
 
   void test_IvsLam() {
@@ -47,22 +57,10 @@ public:
     // No monitor normalization
     // No direct beam normalization
     // No transmission correction
-
     ReflectometryReductionOne2 alg;
-    alg.setChild(true);
-    alg.initialize();
-    alg.setProperty("InputWorkspace", m_multiDetectorWS);
-    alg.setProperty("WavelengthMin", 1.5);
-    alg.setProperty("WavelengthMax", 15.0);
-    alg.setPropertyValue("ProcessingInstructions", "1");
-    alg.setPropertyValue("OutputWorkspace", "IvsQ");
-    alg.setPropertyValue("OutputWorkspaceWavelength", "IvsLam");
-    alg.execute();
-    MatrixWorkspace_sptr outLam = alg.getProperty("OutputWorkspaceWavelength");
+    setupAlgorithm(alg, 1.5, 15.0, "1");
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg);
 
-    TS_ASSERT(outLam);
-    TS_ASSERT_EQUALS(outLam->getNumberHistograms(), 1);
-    TS_ASSERT_EQUALS(outLam->blocksize(), 14);
     TS_ASSERT(outLam->x(0)[0] >= 1.5);
     TS_ASSERT(outLam->x(0)[7] <= 15.0);
     TS_ASSERT_DELTA(outLam->y(0)[0], 2.0000, 0.0001);
@@ -77,19 +75,9 @@ public:
     // Processing instructions : 1+2
 
     ReflectometryReductionOne2 alg;
-    alg.setChild(true);
-    alg.initialize();
-    alg.setProperty("InputWorkspace", m_multiDetectorWS);
-    alg.setProperty("WavelengthMin", 1.5);
-    alg.setProperty("WavelengthMax", 15.0);
-    alg.setPropertyValue("ProcessingInstructions", "1+2");
-    alg.setPropertyValue("OutputWorkspace", "IvsQ");
-    alg.setPropertyValue("OutputWorkspaceWavelength", "IvsLam");
-    alg.execute();
-    MatrixWorkspace_sptr outLam = alg.getProperty("OutputWorkspaceWavelength");
+    setupAlgorithm(alg, 1.5, 15.0, "1+2");
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg);
 
-    TS_ASSERT_EQUALS(outLam->getNumberHistograms(), 1);
-    TS_ASSERT_EQUALS(outLam->blocksize(), 14);
     TS_ASSERT(outLam->x(0)[0] >= 1.5);
     TS_ASSERT(outLam->x(0)[7] <= 15.0);
     // Y counts, should be 2.0000 * 2
@@ -105,19 +93,9 @@ public:
     // Processing instructions : 1-3
 
     ReflectometryReductionOne2 alg;
-    alg.setChild(true);
-    alg.initialize();
-    alg.setProperty("InputWorkspace", m_multiDetectorWS);
-    alg.setProperty("WavelengthMin", 1.5);
-    alg.setProperty("WavelengthMax", 15.0);
-    alg.setPropertyValue("ProcessingInstructions", "1-3");
-    alg.setPropertyValue("OutputWorkspace", "IvsQ");
-    alg.setPropertyValue("OutputWorkspaceWavelength", "IvsLam");
-    alg.execute();
-    MatrixWorkspace_sptr outLam = alg.getProperty("OutputWorkspaceWavelength");
+    setupAlgorithm(alg, 1.5, 15.0, "1-3");
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg);
 
-    TS_ASSERT_EQUALS(outLam->getNumberHistograms(), 1);
-    TS_ASSERT_EQUALS(outLam->blocksize(), 14);
     TS_ASSERT(outLam->x(0)[0] >= 1.5);
     TS_ASSERT(outLam->x(0)[7] <= 15.0);
     // Y counts, should be 2.0000 * 3
@@ -125,20 +103,72 @@ public:
     TS_ASSERT_DELTA(outLam->y(0)[7], 6.0000, 0.0001);
   }
 
+  void test_IvsLam_multiple_detector_groups() {
+    // Test IvsLam workspace
+    // No monitor normalization
+    // No direct beam normalization
+    // No transmission correction
+    // Processing instructions : 2,1+3 (two separate groups)
+
+    ReflectometryReductionOne2 alg;
+    setupAlgorithm(alg, 1.5, 15.0, "2,1+3");
+    // Run the algorithm. There should be 2 output histograms, one for each
+    // input group. Note that the group order is swapped from the input order
+    // because they are sorted by the first spectrum number in the group,
+    // i.e. as if the input was "1+3,2"
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg, 14, 2);
+
+    TS_ASSERT(outLam->x(0)[0] >= 1.5);
+    TS_ASSERT(outLam->x(0)[7] <= 15.0);
+    TS_ASSERT(outLam->x(1)[0] >= 1.5);
+    TS_ASSERT(outLam->x(1)[7] <= 15.0);
+    // Y counts, should be 2.0000 * 2 for first group, 2.0000 * 1 for second.
+    TS_ASSERT_DELTA(outLam->y(0)[0], 4.0000, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[7], 4.0000, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(1)[0], 2.0000, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(1)[7], 2.0000, 0.0001);
+  }
+
   void test_bad_processing_instructions() {
     // Processing instructions : 5+6
 
-    auto alg = AlgorithmManager::Instance().create("ReflectometryReductionOne");
-    alg->setChild(true);
-    alg->initialize();
-    alg->setProperty("InputWorkspace", m_multiDetectorWS);
-    alg->setProperty("WavelengthMin", 1.5);
-    alg->setProperty("WavelengthMax", 15.0);
-    alg->setPropertyValue("OutputWorkspace", "IvsQ");
-    alg->setPropertyValue("OutputWorkspaceWavelength", "IvsLam");
-    alg->setPropertyValue("ProcessingInstructions", "5+6");
+    ReflectometryReductionOne2 alg;
+    setupAlgorithm(alg, 1.5, 15.0, "5+6");
     // Must throw as spectrum 2 is not defined
-    TS_ASSERT_THROWS_ANYTHING(alg->execute());
+    TS_ASSERT_THROWS_ANYTHING(alg.execute());
+  }
+
+  void test_sum_in_lambda() {
+    // Test IvsLam workspace
+    // No monitor normalization
+    // No direct beam normalization
+    // No transmission correction
+    // SummationType : SumInLambda (same as default)
+
+    ReflectometryReductionOne2 alg;
+    setupAlgorithm(alg, 1.5, 15.0, "1");
+    alg.setProperty("SummationType", "SumInLambda");
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg);
+
+    TS_ASSERT(outLam->x(0)[0] >= 1.5);
+    TS_ASSERT(outLam->x(0)[7] <= 15.0);
+    TS_ASSERT_DELTA(outLam->y(0)[0], 2.0000, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[7], 2.0000, 0.0001);
+  }
+
+  void test_sum_in_lambda_with_bad_reduction_type() {
+    // Test IvsLam workspace
+    // No monitor normalization
+    // No direct beam normalization
+    // No transmission correction
+    // SummationType : SumInLambda (same as default)
+    // ReductionType : DivergentBeam (invalid)
+
+    ReflectometryReductionOne2 alg;
+    setupAlgorithm(alg, 1.5, 15.0, "1");
+    alg.setProperty("SummationType", "SumInLambda");
+    alg.setProperty("ReductionType", "DivergentBeam");
+    TS_ASSERT_THROWS_ANYTHING(alg.execute());
   }
 
   void test_IvsLam_direct_beam() {
@@ -146,41 +176,22 @@ public:
     // No monitor normalization
     // Direct beam normalization: 2-3
     // No transmission correction
-    // Processing instructions : 1
+    // Processing instructions : 2
 
     ReflectometryReductionOne2 alg;
-    alg.setChild(true);
-    alg.initialize();
-    alg.setProperty("InputWorkspace", m_multiDetectorWS);
-    alg.setProperty("WavelengthMin", 1.5);
-    alg.setProperty("WavelengthMax", 15.0);
-    alg.setPropertyValue("ProcessingInstructions", "1");
+    setupAlgorithm(alg, 1.5, 15.0, "2");
     alg.setPropertyValue("RegionOfDirectBeam", "2-3");
-    alg.setPropertyValue("OutputWorkspace", "IvsQ");
-    alg.setPropertyValue("OutputWorkspaceWavelength", "IvsLam");
-    alg.execute();
-    MatrixWorkspace_sptr outLam = alg.getProperty("OutputWorkspaceWavelength");
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg);
 
-    TS_ASSERT_EQUALS(outLam->getNumberHistograms(), 1);
-    TS_ASSERT_EQUALS(outLam->blocksize(), 14);
-    // Y counts, should be 0.5 = 1 (from detector ws) / 2 (from direct beam)
-    TS_ASSERT_DELTA(outLam->y(0)[0], 0.5, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[0], 0.4991, 0.0001);
   }
 
   void test_bad_direct_beam() {
     // Direct beam : 4-5
-
-    auto alg = AlgorithmManager::Instance().create("ReflectometryReductionOne");
-    alg->setChild(true);
-    alg->initialize();
-    alg->setProperty("InputWorkspace", m_multiDetectorWS);
-    alg->setProperty("WavelengthMin", 1.5);
-    alg->setProperty("WavelengthMax", 15.0);
-    alg->setPropertyValue("ProcessingInstructions", "1");
-    alg->setPropertyValue("OutputWorkspace", "IvsQ");
-    alg->setPropertyValue("OutputWorkspaceWavelength", "IvsLam");
-    alg->setPropertyValue("RegionOfDirectBeam", "4-5");
-    TS_ASSERT_THROWS_ANYTHING(alg->execute());
+    ReflectometryReductionOne2 alg;
+    setupAlgorithm(alg, 1.5, 15.0, "1");
+    alg.setPropertyValue("RegionOfDirectBeam", "4-5");
+    TS_ASSERT_THROWS_ANYTHING(alg.execute());
   }
 
   void test_IvsLam_no_monitors() {
@@ -195,20 +206,10 @@ public:
     // MonitorBackgroundWavelengthMax : Not given
 
     ReflectometryReductionOne2 alg;
-    alg.setChild(true);
-    alg.initialize();
-    alg.setProperty("InputWorkspace", m_multiDetectorWS);
-    alg.setProperty("WavelengthMin", 1.5);
-    alg.setProperty("WavelengthMax", 15.0);
+    setupAlgorithm(alg, 1.5, 15.0, "1");
     alg.setProperty("I0MonitorIndex", "0");
-    alg.setPropertyValue("ProcessingInstructions", "1");
-    alg.setPropertyValue("OutputWorkspace", "IvsQ");
-    alg.setPropertyValue("OutputWorkspaceWavelength", "IvsLam");
-    alg.execute();
-    MatrixWorkspace_sptr outLam = alg.getProperty("OutputWorkspaceWavelength");
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg);
 
-    TS_ASSERT_EQUALS(outLam->getNumberHistograms(), 1);
-    TS_ASSERT_EQUALS(outLam->blocksize(), 14);
     TS_ASSERT(outLam->x(0)[0] >= 1.5);
     TS_ASSERT(outLam->x(0)[7] <= 15.0);
     // No monitors considered because MonitorBackgroundWavelengthMin
@@ -223,7 +224,7 @@ public:
     // Monitor normalization
     // No direct beam normalization
     // No transmission correction
-    // Processing instructions : 1
+    // Processing instructions : 2
 
     // I0MonitorIndex: 0
     // MonitorBackgroundWavelengthMin : 0.5
@@ -233,27 +234,13 @@ public:
     // Modify counts in monitor (only for this test)
     // Modify counts only for range that will be fitted
     auto inputWS = m_multiDetectorWS;
-    auto &Y = inputWS->mutableY(0);
+    auto &Y = m_multiDetectorWS->mutableY(0);
     std::fill(Y.begin(), Y.begin() + 2, 1.0);
 
     ReflectometryReductionOne2 alg;
-    alg.setChild(true);
-    alg.initialize();
-    alg.setProperty("InputWorkspace", inputWS);
-    alg.setProperty("WavelengthMin", 0.0);
-    alg.setProperty("WavelengthMax", 15.0);
-    alg.setProperty("I0MonitorIndex", "0");
-    alg.setProperty("MonitorBackgroundWavelengthMin", 0.5);
-    alg.setProperty("MonitorBackgroundWavelengthMax", 3.0);
-    alg.setProperty("NormalizeByIntegratedMonitors", "0");
-    alg.setPropertyValue("ProcessingInstructions", "1");
-    alg.setPropertyValue("OutputWorkspace", "IvsQ");
-    alg.setPropertyValue("OutputWorkspaceWavelength", "IvsLam");
-    alg.execute();
-    MatrixWorkspace_sptr outLam = alg.getProperty("OutputWorkspaceWavelength");
+    setupAlgorithmMonitorCorrection(alg, 0.0, 15.0, "2", inputWS, false);
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg, 10);
 
-    TS_ASSERT_EQUALS(outLam->getNumberHistograms(), 1);
-    TS_ASSERT_EQUALS(outLam->blocksize(), 10);
     TS_ASSERT(outLam->x(0)[0] >= 0.0);
     TS_ASSERT(outLam->x(0)[7] <= 15.0);
     // Expected values are 2.4996 = 3.15301 (detectors) / 1.26139 (monitors)
@@ -281,25 +268,9 @@ public:
     std::fill(Y.begin(), Y.begin() + 2, 1.0);
 
     ReflectometryReductionOne2 alg;
-    alg.setChild(true);
-    alg.initialize();
-    alg.setProperty("InputWorkspace", inputWS);
-    alg.setProperty("WavelengthMin", 0.0);
-    alg.setProperty("WavelengthMax", 15.0);
-    alg.setProperty("I0MonitorIndex", "0");
-    alg.setProperty("MonitorBackgroundWavelengthMin", 0.5);
-    alg.setProperty("MonitorBackgroundWavelengthMax", 3.0);
-    alg.setProperty("NormalizeByIntegratedMonitors", "1");
-    alg.setProperty("MonitorIntegrationWavelengthMin", 1.5);
-    alg.setProperty("MonitorIntegrationWavelengthMax", 15.0);
-    alg.setPropertyValue("ProcessingInstructions", "1");
-    alg.setPropertyValue("OutputWorkspace", "IvsQ");
-    alg.setPropertyValue("OutputWorkspaceWavelength", "IvsLam");
-    alg.execute();
-    MatrixWorkspace_sptr outLam = alg.getProperty("OutputWorkspaceWavelength");
+    setupAlgorithmMonitorCorrection(alg, 0.0, 15.0, "1", inputWS, true);
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg, 16);
 
-    TS_ASSERT_EQUALS(outLam->getNumberHistograms(), 1);
-    TS_ASSERT_EQUALS(outLam->blocksize(), 16);
     TS_ASSERT(outLam->x(0)[0] >= 0.0);
     TS_ASSERT(outLam->x(0)[7] <= 15.0);
     // Expected values are 0.1981 = 2.0000 (detectors) / (1.26139*8) (monitors)
@@ -311,20 +282,10 @@ public:
     // Transmission run is the same as input run
 
     ReflectometryReductionOne2 alg;
-    alg.setChild(true);
-    alg.initialize();
-    alg.setProperty("InputWorkspace", m_multiDetectorWS);
-    alg.setProperty("FirstTransmissionRun", m_multiDetectorWS);
-    alg.setProperty("WavelengthMin", 1.5);
-    alg.setProperty("WavelengthMax", 15.0);
-    alg.setProperty("ProcessingInstructions", "1");
-    alg.setPropertyValue("OutputWorkspace", "IvsQ");
-    alg.setPropertyValue("OutputWorkspaceWavelength", "IvsLam");
-    alg.execute();
-    MatrixWorkspace_sptr outLam = alg.getProperty("OutputWorkspaceWavelength");
+    setupAlgorithmTransmissionCorrection(alg, 1.5, 15.0, "1", m_multiDetectorWS,
+                                         false);
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg);
 
-    TS_ASSERT_EQUALS(outLam->getNumberHistograms(), 1);
-    TS_ASSERT_EQUALS(outLam->blocksize(), 14);
     // Expected values are 1 = m_wavelength / m_wavelength
     TS_ASSERT_DELTA(outLam->y(0)[0], 1.0000, 0.0001);
     TS_ASSERT_DELTA(outLam->y(0)[7], 1.0000, 0.0001);
@@ -334,49 +295,67 @@ public:
     // Transmission run is the same as input run
 
     ReflectometryReductionOne2 alg;
-    alg.setChild(true);
-    alg.initialize();
-    alg.setProperty("InputWorkspace", m_multiDetectorWS);
-    alg.setProperty("FirstTransmissionRun", m_multiDetectorWS);
-    alg.setProperty("SecondTransmissionRun", m_multiDetectorWS);
-    alg.setProperty("StartOverlap", 2.5);
-    alg.setProperty("EndOverlap", 3.0);
-    alg.setProperty("Params", "0.1");
-    alg.setProperty("WavelengthMin", 1.5);
-    alg.setProperty("WavelengthMax", 15.0);
-    alg.setProperty("ProcessingInstructions", "1");
-    alg.setPropertyValue("OutputWorkspace", "IvsQ");
-    alg.setPropertyValue("OutputWorkspaceWavelength", "IvsLam");
-    alg.execute();
-    MatrixWorkspace_sptr outLam = alg.getProperty("OutputWorkspaceWavelength");
+    setupAlgorithmTransmissionCorrection(alg, 1.5, 15.0, "1", m_multiDetectorWS,
+                                         true);
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg);
 
-    TS_ASSERT_EQUALS(outLam->getNumberHistograms(), 1);
-    TS_ASSERT_EQUALS(outLam->blocksize(), 14);
     // Expected values are 1 = m_wavelength / m_wavelength
     TS_ASSERT_DELTA(outLam->y(0)[0], 1.0000, 0.0001);
     TS_ASSERT_DELTA(outLam->y(0)[7], 1.0000, 0.0001);
   }
 
+  void test_transmission_correction_with_mapped_spectra() {
+    // Run workspace spectrum numbers are 1,2,3,4.
+    // Transmission workspace has spectrum numbers 2,3,4,5.
+    // Processing instructions 2-3 in the run workspace map to
+    // spectra 3-4, which map to indices 1-2 in the transmission
+    // workspace.
+    ReflectometryReductionOne2 alg;
+    setupAlgorithmTransmissionCorrection(alg, 1.5, 15.0, "2-3",
+                                         m_transmissionWS, true);
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg);
+
+    TS_ASSERT_DELTA(outLam->y(0)[0], 0.0807, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[7], 0.0802, 0.0001);
+  }
+
+  void test_transmission_correction_with_bad_mapped_spectra() {
+    // Run workspace spectrum numbers are 1,2,3,4.
+    // Transmission workspace has spectrum numbers 2,3,4,5.
+    // Processing instructions 0 in the run workspace maps to
+    // spectrum 1, which doesn't exist in the transmission
+    // workspace.
+    ReflectometryReductionOne2 alg;
+    setupAlgorithmTransmissionCorrection(alg, 1.5, 15.0, "0", m_transmissionWS,
+                                         true);
+    TS_ASSERT_THROWS_ANYTHING(alg.execute());
+  }
+
+  void test_transmission_correction_with_different_spectra() {
+    // Run workspace spectrum numbers are 1,2,3,4.  Transmission workspace has
+    // spectrum numbers 2,3,4,5.  Processing instructions 2,3 are used in the
+    // run and transmission workspaces without any mapping i.e. spectra 3-4 in
+    // the run and spectra 4-5 in the transmission workspace are used.
+    ReflectometryReductionOne2 alg;
+    setupAlgorithmTransmissionCorrection(alg, 1.5, 15.0, "2-3",
+                                         m_transmissionWS, true);
+    alg.setProperty("StrictSpectrumChecking", "0");
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg);
+
+    TS_ASSERT_DELTA(outLam->y(0)[0], 0.0571, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[7], 0.0571, 0.0001);
+  }
+
   void test_exponential_correction() {
     // CorrectionAlgorithm: ExponentialCorrection
 
     ReflectometryReductionOne2 alg;
-    alg.setChild(true);
-    alg.initialize();
-    alg.setProperty("InputWorkspace", m_multiDetectorWS);
-    alg.setProperty("WavelengthMin", 1.5);
-    alg.setProperty("WavelengthMax", 15.0);
-    alg.setProperty("ProcessingInstructions", "1");
+    setupAlgorithm(alg, 1.5, 15.0, "2");
     alg.setProperty("CorrectionAlgorithm", "ExponentialCorrection");
     alg.setProperty("C0", 0.2);
     alg.setProperty("C1", 0.1);
-    alg.setPropertyValue("OutputWorkspace", "IvsQ");
-    alg.setPropertyValue("OutputWorkspaceWavelength", "IvsLam");
-    alg.execute();
-    MatrixWorkspace_sptr outLam = alg.getProperty("OutputWorkspaceWavelength");
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg);
 
-    TS_ASSERT_EQUALS(outLam->getNumberHistograms(), 1);
-    TS_ASSERT_EQUALS(outLam->blocksize(), 14);
     TS_ASSERT_DELTA(outLam->y(0)[0], 12.5113, 0.0001);
     TS_ASSERT_DELTA(outLam->y(0)[7], 23.4290, 0.0001);
   }
@@ -385,44 +364,311 @@ public:
     // CorrectionAlgorithm: PolynomialCorrection
 
     ReflectometryReductionOne2 alg;
-    alg.setChild(true);
-    alg.initialize();
-    alg.setProperty("InputWorkspace", m_multiDetectorWS);
-    alg.setProperty("WavelengthMin", 1.5);
-    alg.setProperty("WavelengthMax", 15.0);
-    alg.setProperty("ProcessingInstructions", "1");
+    setupAlgorithm(alg, 1.5, 15.0, "2");
     alg.setProperty("CorrectionAlgorithm", "PolynomialCorrection");
     alg.setProperty("Polynomial", "0.1,0.3,0.5");
-    alg.setPropertyValue("OutputWorkspace", "IvsQ");
-    alg.setPropertyValue("OutputWorkspaceWavelength", "IvsLam");
-    alg.execute();
-    MatrixWorkspace_sptr outLam = alg.getProperty("OutputWorkspaceWavelength");
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg);
 
-    TS_ASSERT_EQUALS(outLam->getNumberHistograms(), 1);
-    TS_ASSERT_EQUALS(outLam->blocksize(), 14);
     TS_ASSERT_DELTA(outLam->y(0)[0], 0.6093, 0.0001);
     TS_ASSERT_DELTA(outLam->y(0)[7], 0.0514, 0.0001);
   }
 
   void test_IvsQ() {
+    // Test IvsQ workspace
+    // No monitor normalization
+    // No direct beam normalization
+    // No transmission correction
+    // Processing instructions : 2
+
+    ReflectometryReductionOne2 alg;
+    setupAlgorithm(alg, 1.5, 15.0, "2");
+    MatrixWorkspace_sptr outQ = runAlgorithmQ(alg);
+
+    // X range in outQ
+    TS_ASSERT_DELTA(outQ->x(0)[0], 0.3353, 0.0001);
+    TS_ASSERT_DELTA(outQ->x(0)[7], 0.5962, 0.0001);
+    // Y counts
+    TS_ASSERT_DELTA(outQ->y(0)[0], 2.0000, 0.0001);
+    TS_ASSERT_DELTA(outQ->y(0)[7], 2.0000, 0.0001);
+  }
+
+  void test_IvsQ_multiple_detector_groups() {
+    // Test IvsQ workspace
+    // No monitor normalization
+    // No direct beam normalization
+    // No transmission correction
+    // Processing instructions : 2,1+3 (two separate groups)
 
     ReflectometryReductionOne2 alg;
+    setupAlgorithm(alg, 1.5, 15.0, "2,1+3");
+    // Run the algorithm. There should be 2 output histograms, one for each
+    // input group. Note that the group order is swapped from the input order
+    // because they are sorted by the first spectrum number in the group,
+    // i.e. as if the input was "1+3,2"
+    MatrixWorkspace_sptr outQ = runAlgorithmQ(alg, 14, 2);
+
+    // X range in outQ
+    TS_ASSERT_DELTA(outQ->x(0)[0], 0.3353, 0.0001);
+    TS_ASSERT_DELTA(outQ->x(0)[7], 0.5961, 0.0001);
+    TS_ASSERT_DELTA(outQ->x(1)[0], 0.3353, 0.0001);
+    TS_ASSERT_DELTA(outQ->x(1)[7], 0.5962, 0.0001);
+    // Y counts, should be 2.0000 * 2 for first group, 2.0000 * 1 for second.
+    TS_ASSERT_DELTA(outQ->y(0)[0], 4.0000, 0.0001);
+    TS_ASSERT_DELTA(outQ->y(0)[7], 4.0000, 0.0001);
+    TS_ASSERT_DELTA(outQ->y(1)[0], 2.0000, 0.0001);
+    TS_ASSERT_DELTA(outQ->y(1)[7], 2.0000, 0.0001);
+  }
+
+  void test_sum_in_q_with_bad_reduction_type() {
+    // Test IvsLam workspace
+    // No monitor normalization
+    // No direct beam normalization
+    // No transmission correction
+    // SummationType : SumInQ
+    // ReductionType : not set (invalid)
+
+    ReflectometryReductionOne2 alg;
+    setupAlgorithm(alg, 1.5, 15.0, "1");
+    alg.setProperty("SummationType", "SumInQ");
+    TS_ASSERT_THROWS_ANYTHING(alg.execute());
+  }
+
+  void test_sum_in_q_divergent_beam() {
+    // Test IvsLam workspace
+    // No monitor normalization
+    // No direct beam normalization
+    // No transmission correction
+    // SummationType : SumInQ
+    // ReductionType : DivergentBeam
+    ReflectometryReductionOne2 alg;
+    setupAlgorithm(alg, 1.5, 15.0, "1");
+    alg.setProperty("SummationType", "SumInQ");
+    alg.setProperty("ReductionType", "DivergentBeam");
+    alg.setProperty("ThetaIn", 25.0);
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg, 18);
+
+    TS_ASSERT_DELTA(outLam->x(0)[0], 1.5338, 0.0001);
+    TS_ASSERT_DELTA(outLam->x(0)[7], 6.5622, 0.0001);
+    TS_ASSERT_DELTA(outLam->x(0)[10], 8.7173, 0.0001);
+    TS_ASSERT_DELTA(outLam->x(0)[17], 13.7457, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[0], 1.8323, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[7], 1.7985, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[10], 2.0212, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[17], 1.9430, 0.0001);
+  }
+
+  void test_sum_in_q_non_flat_sample() {
+    // Test IvsLam workspace
+    // No monitor normalization
+    // No direct beam normalization
+    // No transmission correction
+    // SummationType : SumInQ
+    // ReductionType : NonFlatSample
+
+    ReflectometryReductionOne2 alg;
+    setupAlgorithm(alg, 1.5, 15.0, "1");
+    alg.setProperty("SummationType", "SumInQ");
+    alg.setProperty("ReductionType", "NonFlatSample");
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg, 18);
+
+    TS_ASSERT_DELTA(outLam->x(0)[0], 1.5339, 0.0001);
+    TS_ASSERT_DELTA(outLam->x(0)[7], 6.5110, 0.0001);
+    TS_ASSERT_DELTA(outLam->x(0)[10], 8.6440, 0.0001);
+    TS_ASSERT_DELTA(outLam->x(0)[17], 13.6211, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[0], 1.8386, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[7], 1.6622, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[10], 1.9205, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[17], 1.7303, 0.0001);
+  }
+
+  void test_sum_in_q_direct_beam() {
+    // Test IvsLam workspace
+    // No monitor normalization
+    // Direct beam normalization: 2-3
+    // No transmission correction
+    // Processing instructions : 2
+
+    ReflectometryReductionOne2 alg;
+    setupAlgorithm(alg, 1.5, 15.0, "2");
+    alg.setPropertyValue("RegionOfDirectBeam", "2-3");
+    alg.setProperty("SummationType", "SumInQ");
+    alg.setProperty("ReductionType", "DivergentBeam");
+    alg.setProperty("ThetaIn", 25.0);
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg, 18);
+
+    TS_ASSERT_DELTA(outLam->y(0)[0], 0.2911, 0.0001);
+  }
+
+  void test_sum_in_q_monitor_normalization() {
+    // Test IvsLam workspace
+    // Monitor normalization
+    // No direct beam normalization
+    // No transmission correction
+    // Processing instructions : 2
+    // SummationType : SumInQ
+    // ReductionType : DivergentBeam
+
+    // I0MonitorIndex: 0
+    // MonitorBackgroundWavelengthMin : 0.5
+    // MonitorBackgroundWavelengthMax : 3.0
+    // Normalize by integrated monitors : No
+
+    // Modify counts in monitor (only for this test)
+    // Modify counts only for range that will be fitted
+    auto inputWS = m_multiDetectorWS;
+    auto &Y = m_multiDetectorWS->mutableY(0);
+    std::fill(Y.begin(), Y.begin() + 2, 1.0);
+
+    ReflectometryReductionOne2 alg;
+    setupAlgorithmMonitorCorrection(alg, 0.0, 15.0, "2", inputWS, false);
+    alg.setProperty("SummationType", "SumInQ");
+    alg.setProperty("ReductionType", "DivergentBeam");
+    alg.setProperty("ThetaIn", 25.0);
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg, 18);
+
+    TS_ASSERT_DELTA(outLam->x(0)[0], 0.1244, 0.0001);
+    TS_ASSERT_DELTA(outLam->x(0)[7], 5.6420, 0.0001);
+    TS_ASSERT_DELTA(outLam->x(0)[10], 8.0067, 0.0001);
+    TS_ASSERT_DELTA(outLam->x(0)[17], 13.5243, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[0], 7.6861, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[7], 1.4879, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[10], 1.5523, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[17], 1.6371, 0.0001);
+  }
+
+  void test_sum_in_q_transmission_correction_run() {
+    // Transmission run is the same as input run
+
+    ReflectometryReductionOne2 alg;
+    setupAlgorithmTransmissionCorrection(alg, 1.5, 15.0, "1", m_multiDetectorWS,
+                                         false);
+    alg.setProperty("SummationType", "SumInQ");
+    alg.setProperty("ReductionType", "DivergentBeam");
+    alg.setProperty("ThetaIn", 25.0);
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg, 18);
+
+    TS_ASSERT_DELTA(outLam->y(0)[0], 0.8015, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[7], 0.5722, 0.0001);
+  }
+
+  void test_sum_in_q_exponential_correction() {
+    // CorrectionAlgorithm: ExponentialCorrection
+
+    ReflectometryReductionOne2 alg;
+    setupAlgorithm(alg, 1.5, 15.0, "2");
+    alg.setProperty("SummationType", "SumInQ");
+    alg.setProperty("ReductionType", "DivergentBeam");
+    alg.setProperty("ThetaIn", 25.0);
+    alg.setProperty("CorrectionAlgorithm", "ExponentialCorrection");
+    alg.setProperty("C0", 0.2);
+    alg.setProperty("C1", 0.1);
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg, 18);
+
+    TS_ASSERT_DELTA(outLam->y(0)[0], 11.3636, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[7], 17.7963, 0.0001);
+  }
+
+  void test_sum_in_q_IvsQ() {
+    // Test IvsQ workspace
+    // No monitor normalization
+    // No direct beam normalization
+    // No transmission correction
+    // Processing instructions : 2
+
+    ReflectometryReductionOne2 alg;
+    setupAlgorithm(alg, 1.5, 15.0, "2");
+    alg.setProperty("SummationType", "SumInQ");
+    alg.setProperty("ReductionType", "DivergentBeam");
+    alg.setProperty("ThetaIn", 25.0);
+    MatrixWorkspace_sptr outQ = runAlgorithmQ(alg, 18);
+
+    // X range in outQ
+    TS_ASSERT_DELTA(outQ->x(0)[0], 0.3327, 0.0001);
+    TS_ASSERT_DELTA(outQ->x(0)[7], 0.5100, 0.0001);
+    // Y counts
+    TS_ASSERT_DELTA(outQ->y(0)[0], 1.9348, 0.0001);
+    TS_ASSERT_DELTA(outQ->y(0)[7], 2.0204, 0.0001);
+  }
+
+private:
+  // Do standard algorithm setup
+  void setupAlgorithm(ReflectometryReductionOne2 &alg,
+                      const double wavelengthMin, const double wavelengthMax,
+                      const std::string &procInstr) {
     alg.setChild(true);
     alg.initialize();
     alg.setProperty("InputWorkspace", m_multiDetectorWS);
-    alg.setProperty("WavelengthMin", 1.5);
-    alg.setProperty("WavelengthMax", 15.0);
-    alg.setProperty("ProcessingInstructions", "1");
+    alg.setProperty("WavelengthMin", wavelengthMin);
+    alg.setProperty("WavelengthMax", wavelengthMax);
+    alg.setPropertyValue("ProcessingInstructions", procInstr);
     alg.setPropertyValue("OutputWorkspace", "IvsQ");
     alg.setPropertyValue("OutputWorkspaceWavelength", "IvsLam");
+  }
+
+  // Do standard algorithm setup for transmission correction
+  void setupAlgorithmTransmissionCorrection(ReflectometryReductionOne2 &alg,
+                                            const double wavelengthMin,
+                                            const double wavelengthMax,
+                                            const std::string &procInstr,
+                                            MatrixWorkspace_sptr transWS,
+                                            const bool multiple_runs) {
+    setupAlgorithm(alg, wavelengthMin, wavelengthMax, procInstr);
+    alg.setProperty("FirstTransmissionRun", transWS);
+    if (multiple_runs) {
+      alg.setProperty("SecondTransmissionRun", transWS);
+      alg.setProperty("StartOverlap", 2.5);
+      alg.setProperty("EndOverlap", 3.0);
+      alg.setProperty("Params", "0.1");
+    }
+  }
+
+  // Do standard algorithm setup for monitor correction
+  void setupAlgorithmMonitorCorrection(ReflectometryReductionOne2 &alg,
+                                       const double wavelengthMin,
+                                       const double wavelengthMax,
+                                       const std::string &procInstr,
+                                       MatrixWorkspace_sptr inputWS,
+                                       const bool integrate) {
+    setupAlgorithm(alg, wavelengthMin, wavelengthMax, procInstr);
+    alg.setProperty("InputWorkspace", inputWS);
+    alg.setProperty("I0MonitorIndex", "0");
+    alg.setProperty("MonitorBackgroundWavelengthMin", 0.5);
+    alg.setProperty("MonitorBackgroundWavelengthMax", 3.0);
+    if (integrate) {
+      alg.setProperty("NormalizeByIntegratedMonitors", "1");
+      alg.setProperty("MonitorIntegrationWavelengthMin", 1.5);
+      alg.setProperty("MonitorIntegrationWavelengthMax", 15.0);
+    } else {
+      alg.setProperty("NormalizeByIntegratedMonitors", "0");
+    }
+  }
+
+  // Do standard algorithm execution and checks and return IvsLam
+  MatrixWorkspace_sptr runAlgorithmLam(ReflectometryReductionOne2 &alg,
+                                       const size_t blocksize = 14,
+                                       const size_t nHist = 1) {
     alg.execute();
+
+    MatrixWorkspace_sptr outLam = alg.getProperty("OutputWorkspaceWavelength");
+    TS_ASSERT(outLam);
+    TS_ASSERT_EQUALS(outLam->getNumberHistograms(), nHist);
+    TS_ASSERT_EQUALS(outLam->blocksize(), blocksize);
+
+    return outLam;
+  }
+
+  // Do standard algorithm execution and checks and return IvsQ
+  MatrixWorkspace_sptr runAlgorithmQ(ReflectometryReductionOne2 &alg,
+                                     const size_t blocksize = 14,
+                                     const size_t nHist = 1) {
+    alg.execute();
+
     MatrixWorkspace_sptr outQ = alg.getProperty("OutputWorkspace");
+    TS_ASSERT(outQ);
+    TS_ASSERT_EQUALS(outQ->getNumberHistograms(), nHist);
+    TS_ASSERT_EQUALS(outQ->blocksize(), blocksize);
 
-    TS_ASSERT_EQUALS(outQ->getNumberHistograms(), 1);
-    TS_ASSERT_EQUALS(outQ->blocksize(), 14);
-    // X range in outQ
-    TS_ASSERT_DELTA(outQ->x(0)[0], 0.3353, 0.0001);
-    TS_ASSERT_DELTA(outQ->x(0)[7], 0.5962, 0.0001);
+    return outQ;
   }
 };
 
diff --git a/Framework/Crystal/CMakeLists.txt b/Framework/Crystal/CMakeLists.txt
index 6b15b1a513e137ea7be9535421f29014551583a4..47d587b5026f0afea3c7690abb30d8b2c47b535f 100644
--- a/Framework/Crystal/CMakeLists.txt
+++ b/Framework/Crystal/CMakeLists.txt
@@ -12,6 +12,7 @@ set ( SRC_FILES
 	src/CombinePeaksWorkspaces.cpp
 	src/CompositeCluster.cpp
 	src/ConnectedComponentLabeling.cpp
+	src/CountReflections.cpp
 	src/DiffPeaksWorkspaces.cpp
 	src/DisjointElement.cpp
 	src/FilterPeaks.cpp
@@ -43,6 +44,7 @@ set ( SRC_FILES
 	src/PeakHKLErrors.cpp
 	src/PeakIntegration.cpp
 	src/PeakIntensityVsRadius.cpp
+	src/PeakStatisticsTools.cpp
 	src/PeaksInRegion.cpp
 	src/PeaksIntersection.cpp
 	src/PeaksOnSurface.cpp
@@ -84,6 +86,7 @@ set ( INC_FILES
 	inc/MantidCrystal/CombinePeaksWorkspaces.h
 	inc/MantidCrystal/CompositeCluster.h
 	inc/MantidCrystal/ConnectedComponentLabeling.h
+	inc/MantidCrystal/CountReflections.h
 	inc/MantidCrystal/DiffPeaksWorkspaces.h
 	inc/MantidCrystal/DisjointElement.h
 	inc/MantidCrystal/FilterPeaks.h
@@ -117,6 +120,7 @@ set ( INC_FILES
 	inc/MantidCrystal/PeakHKLErrors.h
 	inc/MantidCrystal/PeakIntegration.h
 	inc/MantidCrystal/PeakIntensityVsRadius.h
+	inc/MantidCrystal/PeakStatisticsTools.h
 	inc/MantidCrystal/PeaksInRegion.h
 	inc/MantidCrystal/PeaksIntersection.h
 	inc/MantidCrystal/PeaksOnSurface.h
@@ -156,7 +160,7 @@ set ( TEST_FILES
 	CombinePeaksWorkspacesTest.h
 	CompositeClusterTest.h
 	ConnectedComponentLabelingTest.h
-	DiffPeaksWorkspacesTest.h
+        DiffPeaksWorkspacesTest.h
 	DisjointElementTest.h
 	FilterPeaksTest.h
 	FindClusterFacesTest.h
@@ -186,6 +190,7 @@ set ( TEST_FILES
 	PeakHKLErrorsTest.h
 	PeakIntegrationTest.h
 	PeakIntensityVsRadiusTest.h
+	PeakStatisticsToolsTest.h
 	PeaksInRegionTest.h
 	PeaksOnSurfaceTest.h
 	PredictFractionalPeaksTest.h
diff --git a/Framework/Crystal/inc/MantidCrystal/CountReflections.h b/Framework/Crystal/inc/MantidCrystal/CountReflections.h
new file mode 100644
index 0000000000000000000000000000000000000000..bf1a72a7f0a77e00faf7172980ead02471873238
--- /dev/null
+++ b/Framework/Crystal/inc/MantidCrystal/CountReflections.h
@@ -0,0 +1,60 @@
+#ifndef MANTID_CRYSTAL_COUNTREFLECTIONS_H_
+#define MANTID_CRYSTAL_COUNTREFLECTIONS_H_
+
+#include "MantidAPI/Algorithm.h"
+#include "MantidCrystal/PeakStatisticsTools.h"
+#include "MantidDataObjects/PeaksWorkspace.h"
+#include "MantidGeometry/Crystal/PointGroup.h"
+#include "MantidKernel/V3D.h"
+
+namespace Mantid {
+namespace Crystal {
+
+/** CountReflections
+
+  This algorithm takes a PeaksWorkspace and calculates statistics that are
+  based on point group symmetry and do not depend on intensities. For those
+  statistics look at SortHKL.
+
+  Copyright &copy; 2017 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge
+  National Laboratory & European Spallation Source
+
+  This file is part of Mantid.
+
+  Mantid is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  Mantid is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+  File change history is stored at: <https://github.com/mantidproject/mantid>
+  Code Documentation is available at: <http://doxygen.mantidproject.org>
+*/
+class DLLExport CountReflections : public API::Algorithm {
+public:
+  const std::string name() const override;
+  int version() const override;
+  const std::string category() const override;
+  const std::string summary() const override;
+
+private:
+  void init() override;
+  void exec() override;
+
+  API::IPeaksWorkspace_sptr getPeaksWorkspace(
+      const DataObjects::PeaksWorkspace_sptr &templateWorkspace,
+      const PeakStatisticsTools::UniqueReflectionCollection &reflections,
+      const Geometry::PointGroup_sptr &pointGroup) const;
+};
+
+} // namespace Crystal
+} // namespace Mantid
+
+#endif /* MANTID_CRYSTAL_COUNTREFLECTIONS_H_ */
diff --git a/Framework/Crystal/inc/MantidCrystal/FindSXPeaks.h b/Framework/Crystal/inc/MantidCrystal/FindSXPeaks.h
index fd1907f08b2330c90b9ef81c5194bf9e49834aaa..3a874af400b8fb7b55489f0e9b82fbff5acf1a14 100644
--- a/Framework/Crystal/inc/MantidCrystal/FindSXPeaks.h
+++ b/Framework/Crystal/inc/MantidCrystal/FindSXPeaks.h
@@ -9,6 +9,9 @@
 #include "MantidAPI/SpectrumInfo.h"
 #include "MantidDataObjects/PeaksWorkspace.h"
 
+#include <unordered_map>
+#include <vector>
+
 namespace Mantid {
 namespace Crystal {
 
@@ -233,6 +236,10 @@ private:
   void init() override;
   //
   void exec() override;
+  // Calculates/returns the average phi value of the detector(s)
+  double calculatePhi(
+      const std::unordered_map<size_t, std::vector<detid_t>> &detectorMapping,
+      const API::SpectrumInfo &spectrumInfo, size_t wsIndex);
   //
   void reducePeakList(const peakvector &);
   /// The value in X to start the search from
@@ -240,9 +247,9 @@ private:
   /// The value in X to finish the search at
   double m_MaxRange;
   /// The spectrum to start the integration from
-  size_t m_MinSpec;
+  size_t m_MinWsIndex;
   /// The spectrum to finish the integration at
-  size_t m_MaxSpec;
+  size_t m_MaxWsIndex;
   // The peaks workspace that contains the peaks information.
   Mantid::DataObjects::PeaksWorkspace_sptr m_peaks;
 };
diff --git a/Framework/Crystal/inc/MantidCrystal/PeakStatisticsTools.h b/Framework/Crystal/inc/MantidCrystal/PeakStatisticsTools.h
new file mode 100644
index 0000000000000000000000000000000000000000..29e46581d4ca976a058354f0bdb0f5446da117a1
--- /dev/null
+++ b/Framework/Crystal/inc/MantidCrystal/PeakStatisticsTools.h
@@ -0,0 +1,169 @@
+#ifndef MANTID_CRYSTAL_PEAKSTATISTICSTOOLS_H_
+#define MANTID_CRYSTAL_PEAKSTATISTICSTOOLS_H_
+
+#include "MantidDataObjects/Peak.h"
+
+#include "MantidGeometry/Crystal/PointGroup.h"
+#include "MantidGeometry/Crystal/ReflectionCondition.h"
+#include "MantidGeometry/Crystal/UnitCell.h"
+
+#include "MantidKernel/V3D.h"
+
+namespace Mantid {
+namespace Crystal {
+namespace PeakStatisticsTools {
+/**
+ * \class UniqueReflection
+ *
+ * This class is a small helper for SortHKL to hold Peak-objects that
+ * belong to the same family of reflections.
+ *
+ * It has methods to return the intensities and sigmas of the contained
+ * Peak-objects as vectors. Furthermore there is a function that removes
+ * outliers based on the intensities/sigmas.
+ *
+ *
+ */
+class DLLExport UniqueReflection {
+public:
+  UniqueReflection(const Kernel::V3D &hkl) : m_hkl(hkl), m_peaks() {}
+
+  const Kernel::V3D &getHKL() const { return m_hkl; }
+
+  void addPeak(const DataObjects::Peak &peak) { m_peaks.push_back(peak); }
+  const std::vector<DataObjects::Peak> &getPeaks() const { return m_peaks; }
+  size_t count() const { return m_peaks.size(); }
+
+  std::vector<double> getIntensities() const;
+  std::vector<double> getSigmas() const;
+
+  UniqueReflection removeOutliers(double sigmaCritical = 3.0) const;
+  void setPeaksIntensityAndSigma(double intensity, double sigma);
+
+private:
+  Kernel::V3D m_hkl;
+  std::vector<DataObjects::Peak> m_peaks;
+};
+
+/**
+ * \class UniqueReflectionCollection
+ *
+ * This class computes all possible unique reflections within the
+ * specified d-limits, given a certain unit cell, lattice centering
+ * and point group. The cost of this computation depends directly
+ * on the size of the unit cell (larger cells result in more
+ * reflections) and to some extent also on the symmetry (higher symmetry
+ * results in more matrix operations).
+ *
+ * After adding observations using addObservations, various reflection-
+ * counts can be obtained, for example to calculate redundancy or
+ * completeness of the observations.
+ *
+ */
+class DLLExport UniqueReflectionCollection {
+public:
+  UniqueReflectionCollection(
+      const Geometry::UnitCell &cell, const std::pair<double, double> &dLimits,
+      const Geometry::PointGroup_sptr &pointGroup,
+      const Geometry::ReflectionCondition_sptr &centering);
+
+  ~UniqueReflectionCollection() = default;
+
+  void addObservations(const std::vector<DataObjects::Peak> &peaks);
+  UniqueReflection getReflection(const Kernel::V3D &hkl) const;
+
+  size_t getUniqueReflectionCount() const;
+  size_t getObservedUniqueReflectionCount(size_t moreThan = 0) const;
+  std::vector<Kernel::V3D> getUnobservedUniqueReflections() const;
+
+  size_t getObservedReflectionCount() const;
+
+  const std::map<Kernel::V3D, UniqueReflection> &getReflections() const;
+
+protected:
+  /// Alternative constructor for testing purposes, no validation is performed.
+  UniqueReflectionCollection(
+      const std::map<Kernel::V3D, UniqueReflection> &reflections,
+      const Geometry::PointGroup_sptr &pointGroup)
+      : m_reflections(reflections), m_pointgroup(pointGroup) {}
+
+private:
+  std::map<Kernel::V3D, UniqueReflection> m_reflections;
+  Geometry::PointGroup_sptr m_pointgroup;
+};
+
+/**
+ * \class PeaksStatistics
+ *
+ * The PeaksStatistics class is a small helper class that is used
+ * in SortHKL. It takes a UniqueReflectionCollection and calculates
+ * a few data set quality indicators such as Rmerge and Rpim.
+ *
+ * Do not rely on this class to exist forever, parts of it may change
+ * or the entire class may disappear over time.
+ */
+class DLLExport PeaksStatistics {
+public:
+  explicit PeaksStatistics(const UniqueReflectionCollection &reflections)
+      : m_measuredReflections(0), m_uniqueReflections(0), m_completeness(0.0),
+        m_redundancy(0.0), m_rMerge(0.0), m_rPim(0.0), m_meanIOverSigma(0.0),
+        m_dspacingMin(0.0), m_dspacingMax(0.0), m_chiSquared(0.0), m_peaks() {
+    m_peaks.reserve(reflections.getObservedReflectionCount());
+    calculatePeaksStatistics(reflections.getReflections());
+  }
+
+  /// Total number of observed reflections - no symmetry is taken into
+  /// account for this.
+  int m_measuredReflections;
+
+  /// Number of unique reflections. This counts each reflection family once,
+  /// according to the point group.
+  int m_uniqueReflections;
+
+  /// Fraction of observed unique reflections in the resolution range defined
+  /// by d_min and d_max.
+  double m_completeness;
+
+  /// Average number of observations for a unique reflection.
+  double m_redundancy;
+
+  /// Merging R-factor, R_merge, sometimes also called R_sym. This is a basic
+  /// measure for how well the intensities of symmetry equivalent reflections
+  /// agree with each other.
+  double m_rMerge;
+
+  /// Precision indicating R-factor (R_{p.i.m}). Also a measurement of agreement
+  /// between equivalent reflections, but without some of the weeknesses of
+  /// R_merge.
+  double m_rPim;
+
+  /// Average signal to noise ratio in the reflections.
+  double m_meanIOverSigma;
+
+  /// Lower d-spacing limit in the data set, sometimes referred to as upper
+  /// resolution limit.
+  double m_dspacingMin;
+
+  /// Upper d-spacing limit in the data set.
+  double m_dspacingMax;
+
+  double m_chiSquared;
+  std::vector<DataObjects::Peak> m_peaks;
+
+private:
+  void calculatePeaksStatistics(
+      const std::map<Kernel::V3D, UniqueReflection> &uniqueReflections);
+
+  double getIOverSigmaSum(const std::vector<double> &sigmas,
+                          const std::vector<double> &intensities) const;
+  double getRMS(const std::vector<double> &data) const;
+
+  std::pair<double, double>
+  getDSpacingLimits(const std::vector<DataObjects::Peak> &peaks) const;
+};
+
+} // namespace PeakStatisticsTools
+} // namespace Crystal
+} // namespace Mantid
+
+#endif /* MANTID_CRYSTAL_PEAKSTATISTICSTOOLS_H_ */
diff --git a/Framework/Crystal/inc/MantidCrystal/PredictPeaks.h b/Framework/Crystal/inc/MantidCrystal/PredictPeaks.h
index 8ad8a6f555f5e52c2a5b579246a1a4dab057544f..922a6ad9ab78a4b6f2b7365920979482134d4df2 100644
--- a/Framework/Crystal/inc/MantidCrystal/PredictPeaks.h
+++ b/Framework/Crystal/inc/MantidCrystal/PredictPeaks.h
@@ -2,13 +2,17 @@
 #define MANTID_CRYSTAL_PREDICTPEAKS_H_
 
 #include "MantidAPI/Algorithm.h"
+#include "MantidAPI/DetectorSearcher.h"
 #include "MantidDataObjects/PeaksWorkspace.h"
 #include "MantidGeometry/Crystal/ReflectionCondition.h"
 #include "MantidKernel/System.h"
+#include "MantidKernel/NearestNeighbours.h"
 #include <MantidGeometry/Crystal/OrientedLattice.h>
 #include <MantidGeometry/Crystal/StructureFactorCalculator.h>
 #include "MantidKernel/Matrix.h"
 
+#include <tuple>
+
 namespace Mantid {
 namespace Crystal {
 
@@ -63,6 +67,11 @@ private:
                                 const Kernel::DblMatrix &goniometerMatrix);
 
 private:
+  /// Get the predicted detector direction from Q
+  std::tuple<Kernel::V3D, double>
+  getPeakParametersFromQ(const Kernel::V3D &q) const;
+  /// Cache the reference frame and beam direction from the instrument
+  void setReferenceFrameAndBeamDirection();
   void logNumberOfPeaksFound(size_t allowedPeakCount) const;
 
   /// Number of edge pixels with no peaks
@@ -70,11 +79,16 @@ private:
 
   /// Reflection conditions possible
   std::vector<Mantid::Geometry::ReflectionCondition_sptr> m_refConds;
-
+  /// Detector search cache for fast look-up of detectors
+  std::unique_ptr<API::DetectorSearcher> m_detectorCacheSearch;
   /// Run number of input workspace
   int m_runNumber;
   /// Instrument reference
   Geometry::Instrument_const_sptr m_inst;
+  /// Reference frame for the instrument
+  boost::shared_ptr<const Geometry::ReferenceFrame> m_refFrame;
+  /// Direction of the beam for this instrument
+  Kernel::V3D m_refBeamDir;
   /// Output peaks workspace
   Mantid::DataObjects::PeaksWorkspace_sptr m_pw;
   Geometry::StructureFactorCalculator_sptr m_sfCalculator;
diff --git a/Framework/Crystal/inc/MantidCrystal/SortHKL.h b/Framework/Crystal/inc/MantidCrystal/SortHKL.h
index 4f39388cd25228d174b690ea04597dfb1feb954f..f2a72e51f2a684dfc686d3f140d3be196c3d979d 100644
--- a/Framework/Crystal/inc/MantidCrystal/SortHKL.h
+++ b/Framework/Crystal/inc/MantidCrystal/SortHKL.h
@@ -7,6 +7,8 @@
 #include "MantidAPI/IPeaksWorkspace_fwd.h"
 #include "MantidAPI/ITableWorkspace_fwd.h"
 
+#include "MantidCrystal/PeakStatisticsTools.h"
+
 #include "MantidDataObjects/Peak.h"
 #include "MantidDataObjects/PeaksWorkspace.h"
 
@@ -19,88 +21,6 @@
 namespace Mantid {
 namespace Crystal {
 
-/**
- * \class UniqueReflection
- *
- * This class is a small helper for SortHKL to hold Peak-objects that
- * belong to the same family of reflections.
- *
- * It has methods to return the intensities and sigmas of the contained
- * Peak-objects as vectors. Furthermore there is a function that removes
- * outliers based on the intensities/sigmas.
- *
- *
- */
-class DLLExport UniqueReflection {
-public:
-  UniqueReflection(const Kernel::V3D &hkl) : m_hkl(hkl), m_peaks() {}
-
-  const Kernel::V3D &getHKL() const { return m_hkl; }
-
-  void addPeak(const DataObjects::Peak &peak) { m_peaks.push_back(peak); }
-  const std::vector<DataObjects::Peak> &getPeaks() const { return m_peaks; }
-  size_t count() const { return m_peaks.size(); }
-
-  std::vector<double> getIntensities() const;
-  std::vector<double> getSigmas() const;
-
-  void removeOutliers(double sigmaCritical = 3.0);
-  void setPeaksIntensityAndSigma(double intensity, double sigma);
-
-private:
-  Kernel::V3D m_hkl;
-  std::vector<DataObjects::Peak> m_peaks;
-};
-
-/**
- * \class PeaksStatistics
- *
- * The PeaksStatistics class is a small helper class for SortHKL.
- *
- * During construction, a number of statistical indicators is calculated,
- * using the map passed to the constructor.
- *
- * Please note that the map is modified during the calculation and becomes
- * essentially unusable after that, but that is not a problem since the map
- * is currently not meant to be stored anywhere. This class may eventually
- * disappear and might end up being re-implemented in a more general scope.
- */
-class DLLExport PeaksStatistics {
-public:
-  PeaksStatistics(std::map<Kernel::V3D, UniqueReflection> &uniqueReflections,
-                  size_t totalReflectionCount)
-      : m_measuredReflections(0), m_uniqueReflections(0), m_completeness(0.0),
-        m_redundancy(0.0), m_rMerge(0.0), m_rPim(0.0), m_meanIOverSigma(0.0),
-        m_dspacingMin(0.0), m_dspacingMax(0.0), m_chiSquared(0.0), m_peaks() {
-    m_peaks.reserve(totalReflectionCount);
-    calculatePeaksStatistics(uniqueReflections);
-  }
-
-  int m_measuredReflections;
-  int m_uniqueReflections;
-  double m_completeness;
-  double m_redundancy;
-  double m_rMerge;
-  double m_rPim;
-  double m_meanIOverSigma;
-  double m_dspacingMin;
-  double m_dspacingMax;
-  double m_chiSquared;
-
-  std::vector<DataObjects::Peak> m_peaks;
-
-private:
-  void calculatePeaksStatistics(
-      std::map<Kernel::V3D, UniqueReflection> &uniqueReflections);
-
-  double getIOverSigmaSum(const std::vector<double> &sigmas,
-                          const std::vector<double> &intensities) const;
-  double getRMS(const std::vector<double> &data) const;
-
-  std::pair<double, double>
-  getDSpacingLimits(const std::vector<DataObjects::Peak> &peaks) const;
-};
-
 /** Save a PeaksWorkspace to a Gsas-style ASCII .hkl file.
  *
  * @author Vickie Lynch, SNS
@@ -133,7 +53,7 @@ private:
   std::vector<DataObjects::Peak>
   getNonZeroPeaks(const std::vector<DataObjects::Peak> &inputPeaks) const;
 
-  std::map<Kernel::V3D, UniqueReflection>
+  PeakStatisticsTools::UniqueReflectionCollection
   getUniqueReflections(const std::vector<DataObjects::Peak> &peaks,
                        const Geometry::UnitCell &cell) const;
 
@@ -144,14 +64,10 @@ private:
   getDLimits(const std::vector<DataObjects::Peak> &peaks,
              const Geometry::UnitCell &cell) const;
 
-  std::map<Kernel::V3D, UniqueReflection> getPossibleUniqueReflections(
-      const Geometry::UnitCell &cell, const std::pair<double, double> &dLimits,
-      const Geometry::PointGroup_sptr &pointGroup,
-      const Geometry::ReflectionCondition_sptr &centering) const;
-
   API::ITableWorkspace_sptr getStatisticsTable(const std::string &name) const;
-  void insertStatisticsIntoTable(const API::ITableWorkspace_sptr &table,
-                                 const PeaksStatistics &statistics) const;
+  void insertStatisticsIntoTable(
+      const API::ITableWorkspace_sptr &table,
+      const PeakStatisticsTools::PeaksStatistics &statistics) const;
 
   DataObjects::PeaksWorkspace_sptr getOutputPeaksWorkspace(
       const DataObjects::PeaksWorkspace_sptr &inputPeaksWorkspace) const;
diff --git a/Framework/Crystal/src/CountReflections.cpp b/Framework/Crystal/src/CountReflections.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a65b6e4369e3e8392d5c5fbcba389d1f345961ac
--- /dev/null
+++ b/Framework/Crystal/src/CountReflections.cpp
@@ -0,0 +1,217 @@
+#include "MantidCrystal/CountReflections.h"
+#include "MantidCrystal/PeakStatisticsTools.h"
+
+#include "MantidAPI/Sample.h"
+#include "MantidAPI/WorkspaceProperty.h"
+
+#include "MantidDataObjects/PeaksWorkspace.h"
+
+#include "MantidGeometry/Crystal/PointGroupFactory.h"
+#include "MantidGeometry/Crystal/ReflectionCondition.h"
+#include "MantidGeometry/Crystal/OrientedLattice.h"
+
+#include "MantidKernel/make_unique.h"
+#include "MantidKernel/ListValidator.h"
+
+namespace Mantid {
+namespace Crystal {
+
+using Mantid::Kernel::Direction;
+
+using namespace Mantid::API;
+using namespace Mantid::DataObjects;
+using namespace Mantid::Geometry;
+using namespace Mantid::Kernel;
+
+// Register the algorithm into the AlgorithmFactory
+DECLARE_ALGORITHM(CountReflections)
+
+//----------------------------------------------------------------------------------------------
+
+/// Algorithms name for identification. @see Algorithm::name
+const std::string CountReflections::name() const { return "CountReflections"; }
+
+/// Algorithm's version for identification. @see Algorithm::version
+int CountReflections::version() const { return 1; }
+
+/// Algorithm's category for identification. @see Algorithm::category
+const std::string CountReflections::category() const {
+  return "Crystal\\Peaks";
+}
+
+/// Algorithm's summary for use in the GUI and help. @see Algorithm::summary
+const std::string CountReflections::summary() const {
+  return "Calculates statistics for a PeaksWorkspace based on symmetry and "
+         "counting reflections.";
+}
+
+//----------------------------------------------------------------------------------------------
+/** Initialize the algorithm's properties.
+ */
+void CountReflections::init() {
+  declareProperty(Kernel::make_unique<WorkspaceProperty<PeaksWorkspace>>(
+                      "InputWorkspace", "", Direction::Input),
+                  "A workspace with peaks to calculate statistics for. Sample "
+                  "with valid UB-matrix is required.");
+
+  auto centeringSymbols = getAllReflectionConditionSymbols();
+  declareProperty("LatticeCentering", centeringSymbols[0],
+                  boost::make_shared<StringListValidator>(centeringSymbols),
+                  "Lattice centering of the cell.");
+
+  auto pointGroups = PointGroupFactory::Instance().getAllPointGroupSymbols();
+  declareProperty(
+      "PointGroup", "1", boost::make_shared<StringListValidator>(pointGroups),
+      "Point group symmetry for completeness and redundancy calculations.");
+
+  declareProperty(Kernel::make_unique<PropertyWithValue<double>>(
+                      "MinDSpacing", 1.0, Direction::Input),
+                  "Minimum d-spacing for completeness calculation.");
+
+  declareProperty(Kernel::make_unique<PropertyWithValue<double>>(
+                      "MaxDSpacing", 100.0, Direction::Input),
+                  "Maximum d-spacing for completeness calculation.");
+
+  declareProperty(Kernel::make_unique<PropertyWithValue<int>>(
+                      "UniqueReflections", 0, Direction::Output),
+                  "Number of unique reflections in data set.");
+
+  declareProperty(
+      Kernel::make_unique<PropertyWithValue<double>>("Completeness", 0.0,
+                                                     Direction::Output),
+      "Completeness of the data set as a fraction between 0 and 1.");
+
+  declareProperty(Kernel::make_unique<PropertyWithValue<double>>(
+                      "Redundancy", 0.0, Direction::Output),
+                  "Average redundancy in data set, depending on point group.");
+
+  declareProperty(Kernel::make_unique<PropertyWithValue<double>>(
+                      "MultiplyObserved", 0.0, Direction::Output),
+                  "Fraction of reflections with more than one observation.");
+
+  declareProperty(
+      Kernel::make_unique<WorkspaceProperty<IPeaksWorkspace>>(
+          "MissingReflectionsWorkspace", "", Direction::Output,
+          PropertyMode::Optional),
+      "Reflections in specified d-range that are missing in input workspace.");
+}
+
+//----------------------------------------------------------------------------------------------
+/** Execute the algorithm.
+ */
+void CountReflections::exec() {
+  double dMin = getProperty("MinDSpacing");
+  double dMax = getProperty("MaxDSpacing");
+
+  PointGroup_sptr pointGroup =
+      PointGroupFactory::Instance().createPointGroup(getProperty("PointGroup"));
+
+  ReflectionCondition_sptr centering =
+      getReflectionConditionBySymbol(getProperty("LatticeCentering"));
+
+  PeaksWorkspace_sptr inputPeaksWorkspace = getProperty("InputWorkspace");
+
+  UnitCell cell = inputPeaksWorkspace->sample().getOrientedLattice();
+
+  PeakStatisticsTools::UniqueReflectionCollection reflections(
+      cell, std::make_pair(dMin, dMax), pointGroup, centering);
+
+  auto peaks = inputPeaksWorkspace->getPeaks();
+  reflections.addObservations(peaks);
+
+  double possibleUniqueReflections =
+      static_cast<double>(reflections.getUniqueReflectionCount());
+
+  size_t observedUniqueReflections =
+      reflections.getObservedUniqueReflectionCount();
+
+  double observedUniqueReflectionsD =
+      static_cast<double>(observedUniqueReflections);
+
+  size_t totalReflections = reflections.getObservedReflectionCount();
+
+  if (peaks.size() > totalReflections) {
+    g_log.information() << "There are " << (peaks.size() - totalReflections)
+                        << " peaks in the input workspace that fall outside "
+                           "the resolution limit and are not considered for "
+                           "the calculations." << std::endl;
+  }
+
+  double multiplyObservedReflections =
+      static_cast<double>(reflections.getObservedUniqueReflectionCount(1));
+
+  setProperty("UniqueReflections", static_cast<int>(observedUniqueReflections));
+  setProperty("Completeness",
+              observedUniqueReflectionsD / possibleUniqueReflections);
+  setProperty("Redundancy", static_cast<double>(totalReflections) /
+                                observedUniqueReflectionsD);
+  setProperty("MultiplyObserved",
+              multiplyObservedReflections / observedUniqueReflectionsD);
+
+  IPeaksWorkspace_sptr outputWorkspace =
+      getPeaksWorkspace(inputPeaksWorkspace, reflections, pointGroup);
+
+  if (outputWorkspace) {
+    setProperty("MissingReflectionsWorkspace", outputWorkspace);
+  }
+}
+
+/**
+ * @brief CountReflections::getPeaksWorkspace
+ *
+ * This method expands the missing unique reflections to all reflections,
+ * so that for example (001) would yield (001) and (00-1) for point group -1.
+ *
+ * Then these reflections are translated into peaks and put into the output-
+ * workspace. This method could at some point probably move closer to (or into)
+ * UniqueReflectionCollection.
+ *
+ * @param templateWorkspace :: Input workspace to clone if necessary.
+ * @param reflections :: Vector of unique reflections.
+ * @param pointGroup :: Point group to expand unique reflections.
+ * @return :: PeaksWorkspace with missing reflections.
+ */
+IPeaksWorkspace_sptr CountReflections::getPeaksWorkspace(
+    const PeaksWorkspace_sptr &templateWorkspace,
+    const PeakStatisticsTools::UniqueReflectionCollection &reflections,
+    const PointGroup_sptr &pointGroup) const {
+  std::string outputWorkspaceName =
+      getPropertyValue("MissingReflectionsWorkspace");
+
+  if (outputWorkspaceName.empty()) {
+    return IPeaksWorkspace_sptr();
+  }
+
+  IPeaksWorkspace_sptr rawOutputPeaksWorkspace =
+      getProperty("MissingReflectionsWorkspace");
+
+  PeaksWorkspace_sptr outputPeaksWorkspace =
+      boost::dynamic_pointer_cast<PeaksWorkspace>(rawOutputPeaksWorkspace);
+
+  if (outputPeaksWorkspace != templateWorkspace) {
+    outputPeaksWorkspace = templateWorkspace->clone();
+  }
+
+  const auto &missingPeaks = reflections.getUnobservedUniqueReflections();
+
+  std::vector<Peak> peaks;
+  peaks.reserve(missingPeaks.size() * pointGroup->order());
+
+  for (const auto &reflection : missingPeaks) {
+    auto hkls = pointGroup->getEquivalents(reflection);
+
+    for (const auto &hkl : hkls) {
+      Peak peak;
+      peak.setHKL(hkl);
+
+      peaks.emplace_back(peak);
+    }
+  }
+
+  outputPeaksWorkspace->getPeaks().swap(peaks);
+
+  return boost::static_pointer_cast<IPeaksWorkspace>(outputPeaksWorkspace);
+}
+
+} // namespace Crystal
+} // namespace Mantid
diff --git a/Framework/Crystal/src/FindSXPeaks.cpp b/Framework/Crystal/src/FindSXPeaks.cpp
index 4178d8db8c8fa9fe78bf8c4eae9adcc66acd7abc..3b3e6573f716485537d342bbcfff2a04ea89547a 100644
--- a/Framework/Crystal/src/FindSXPeaks.cpp
+++ b/Framework/Crystal/src/FindSXPeaks.cpp
@@ -3,11 +3,51 @@
 //----------------------------------------------------------------------
 #include "MantidCrystal/FindSXPeaks.h"
 #include "MantidAPI/HistogramValidator.h"
-#include "MantidKernel/VectorHelper.h"
+#include "MantidAPI/DetectorInfo.h"
+#include "MantidAPI/WorkspaceUnitValidator.h"
+#include "MantidGeometry/Instrument/DetectorGroup.h"
+#include "MantidIndexing/IndexInfo.h"
 #include "MantidKernel/BoundedValidator.h"
+#include "MantidKernel/CompositeValidator.h"
+
+#include <unordered_map>
+#include <vector>
 
 using namespace Mantid::DataObjects;
 
+namespace {
+// Anonymous namespace
+using namespace Mantid;
+using WsIndexToDetIds = std::unordered_map<size_t, std::vector<detid_t>>;
+
+WsIndexToDetIds mapDetectorsToWsIndexes(const API::DetectorInfo &detectorInfo,
+                                        const detid2index_map &mapping) {
+  const auto &detectorIds = detectorInfo.detectorIDs();
+  WsIndexToDetIds indexToDetMapping;
+
+  indexToDetMapping.reserve(detectorIds.size());
+  for (const auto detectorID : detectorIds) {
+    auto detMapEntry = mapping.find(detectorID);
+    if (detMapEntry == mapping.end()) {
+      throw std::runtime_error(
+          "Detector ID " + std::to_string(detectorID) +
+          " was not found in the workspace index mapping.");
+    }
+
+    const size_t wsIndex = detMapEntry->second;
+    auto indexMapEntry = indexToDetMapping.find(wsIndex);
+    if (indexMapEntry == indexToDetMapping.end()) {
+      // Create a new vector if one does not exist
+      indexToDetMapping[wsIndex] = std::vector<detid_t>{detectorID};
+    } else {
+      // Otherwise add the detector ID to the current list
+      indexToDetMapping[wsIndex].push_back(detectorID);
+    }
+  }
+  return indexToDetMapping;
+}
+}
+
 namespace Mantid {
 namespace Crystal {
 // Register the class into the algorithm factory
@@ -16,17 +56,23 @@ DECLARE_ALGORITHM(FindSXPeaks)
 using namespace Kernel;
 using namespace API;
 
+// Type def the index to detector mapping
+using WsIndexToDetIds = std::unordered_map<size_t, std::vector<detid_t>>;
+
 FindSXPeaks::FindSXPeaks()
-    : API::Algorithm(), m_MinRange(DBL_MAX), m_MaxRange(-DBL_MAX), m_MinSpec(0),
-      m_MaxSpec(0) {}
+    : API::Algorithm(), m_MinRange(DBL_MAX), m_MaxRange(-DBL_MAX),
+      m_MinWsIndex(0), m_MaxWsIndex(0) {}
 
 /** Initialisation method.
  *
  */
 void FindSXPeaks::init() {
+  auto wsValidation = boost::make_shared<CompositeValidator>();
+  wsValidation->add<HistogramValidator>();
+  wsValidation->add<WorkspaceUnitValidator>("TOF");
+
   declareProperty(make_unique<WorkspaceProperty<>>(
-                      "InputWorkspace", "", Direction::Input,
-                      boost::make_shared<HistogramValidator>()),
+                      "InputWorkspace", "", Direction::Input, wsValidation),
                   "The name of the Workspace2D to take as input");
   declareProperty("RangeLower", EMPTY_DBL(),
                   "The X value to search from (default 0)");
@@ -62,33 +108,33 @@ void FindSXPeaks::exec() {
   m_MaxRange = getProperty("RangeUpper");
 
   // the assignment below is intended and if removed will break the unit tests
-  m_MinSpec = static_cast<int>(getProperty("StartWorkspaceIndex"));
-  m_MaxSpec = static_cast<int>(getProperty("EndWorkspaceIndex"));
+  m_MinWsIndex = static_cast<int>(getProperty("StartWorkspaceIndex"));
+  m_MaxWsIndex = static_cast<int>(getProperty("EndWorkspaceIndex"));
   double SB = getProperty("SignalBackground");
 
   // Get the input workspace
   MatrixWorkspace_const_sptr localworkspace = getProperty("InputWorkspace");
 
-  // copy the instrument accross. Cannot generate peaks without doing this
+  // copy the instrument across. Cannot generate peaks without doing this
   // first.
   m_peaks->setInstrument(localworkspace->getInstrument());
 
   size_t numberOfSpectra = localworkspace->getNumberHistograms();
 
   // Check 'StartSpectrum' is in range 0-numberOfSpectra
-  if (m_MinSpec > numberOfSpectra) {
+  if (m_MinWsIndex > numberOfSpectra) {
     g_log.warning("StartSpectrum out of range! Set to 0.");
-    m_MinSpec = 0;
+    m_MinWsIndex = 0;
   }
-  if (m_MinSpec > m_MaxSpec) {
+  if (m_MinWsIndex > m_MaxWsIndex) {
     throw std::invalid_argument(
         "Cannot have StartWorkspaceIndex > EndWorkspaceIndex");
   }
-  if (isEmpty(m_MaxSpec))
-    m_MaxSpec = numberOfSpectra - 1;
-  if (m_MaxSpec > numberOfSpectra - 1 || m_MaxSpec < m_MinSpec) {
+  if (isEmpty(m_MaxWsIndex))
+    m_MaxWsIndex = numberOfSpectra - 1;
+  if (m_MaxWsIndex > numberOfSpectra - 1 || m_MaxWsIndex < m_MinWsIndex) {
     g_log.warning("EndSpectrum out of range! Set to max detector number");
-    m_MaxSpec = numberOfSpectra;
+    m_MaxWsIndex = numberOfSpectra;
   }
   if (m_MinRange > m_MaxRange) {
     g_log.warning("Range_upper is less than Range_lower. Will integrate up to "
@@ -96,23 +142,33 @@ void FindSXPeaks::exec() {
     m_MaxRange = 0.0;
   }
 
-  Progress progress(this, 0, 1, (m_MaxSpec - m_MinSpec + 1));
+  Progress progress(this, 0, 1, (m_MaxWsIndex - m_MinWsIndex + 1));
 
   // Calculate the primary flight path.
   const auto &spectrumInfo = localworkspace->spectrumInfo();
+  const auto &detectorInfo = localworkspace->detectorInfo();
+
+  const WsIndexToDetIds wsIndexToDetIdMap = mapDetectorsToWsIndexes(
+      detectorInfo, localworkspace->getDetectorIDToWorkspaceIndexMap());
 
   peakvector entries;
-  // Reserve 1000 peaks to make later push_back fast for first 1000 peaks, but
-  // unlikely to have more than this.
-  entries.reserve(1000);
-  // Count the peaks so that we can resize the peakvector at the end.
+  entries.reserve(m_MaxWsIndex - m_MinWsIndex);
+  // Count the peaks so that we can resize the peak vector at the end.
   PARALLEL_FOR_IF(Kernel::threadSafe(*localworkspace))
-  for (int i = static_cast<int>(m_MinSpec); i <= static_cast<int>(m_MaxSpec);
-       ++i) {
+  for (int wsIndex = static_cast<int>(m_MinWsIndex);
+       wsIndex <= static_cast<int>(m_MaxWsIndex); ++wsIndex) {
     PARALLEL_START_INTERUPT_REGION
+
+    // If no detector found / monitor, skip onto the next spectrum
+    const size_t wsIndexSize_t = static_cast<size_t>(wsIndex);
+    if (!spectrumInfo.hasDetectors(wsIndexSize_t) ||
+        spectrumInfo.isMonitor(wsIndexSize_t)) {
+      continue;
+    }
+
     // Retrieve the spectrum into a vector
-    const auto &X = localworkspace->x(i);
-    const auto &Y = localworkspace->y(i);
+    const auto &X = localworkspace->x(wsIndex);
+    const auto &Y = localworkspace->y(wsIndex);
 
     // Find the range [min,max]
     auto lowit = (m_MinRange == EMPTY_DBL())
@@ -152,26 +208,11 @@ void FindSXPeaks::exec() {
     double rightBinEdge = *std::next(leftBinPosition);
     double tof = 0.5 * (leftBinEdge + rightBinEdge);
 
-    // If no detector found, skip onto the next spectrum
-    if (!spectrumInfo.hasDetectors(static_cast<size_t>(i))) {
-      continue;
-    }
-    if (!spectrumInfo.hasUniqueDetector(i)) {
-      std::ostringstream sout;
-      sout << "Spectrum at workspace index " << i
-           << " has unsupported number of detectors.";
-      throw std::runtime_error(sout.str());
-    }
-    const auto &det = spectrumInfo.detector(static_cast<size_t>(i));
-
-    double phi = det.getPhi();
-    if (phi < 0) {
-      phi += 2.0 * M_PI;
-    }
-
-    std::vector<int> specs(1, i);
+    const double phi =
+        calculatePhi(wsIndexToDetIdMap, spectrumInfo, wsIndexSize_t);
 
-    SXPeak peak(tof, phi, *maxY, specs, i, spectrumInfo);
+    std::vector<int> specs(1, wsIndex);
+    SXPeak peak(tof, phi, *maxY, specs, wsIndex, spectrumInfo);
     PARALLEL_CRITICAL(entries) { entries.push_back(peak); }
     progress.report();
     PARALLEL_END_INTERUPT_REGION
@@ -185,6 +226,38 @@ void FindSXPeaks::exec() {
   progress.report();
 }
 
+/**
+  * Calculates the average phi value if the workspace contains
+  * multiple detectors per spectrum, or returns the value
+  * of phi if it is a single detector to spectrum mapping.
+  * @param detectorMapping :: The mapping of workspace index to detector id(s)
+  * @param spectrumInfo :: The spectrum info of this workspace
+  * @param wsIndex :: The index to return the phi value of
+  * @return :: The averaged or exact value of phi
+  */
+double FindSXPeaks::calculatePhi(const WsIndexToDetIds &detectorMapping,
+                                 const SpectrumInfo &spectrumInfo,
+                                 size_t wsIndex) {
+  double phi = std::numeric_limits<double>::infinity();
+  const size_t numDetectors = detectorMapping.at(wsIndex).size();
+  const auto &det = spectrumInfo.detector(wsIndex);
+  if (numDetectors == 1) {
+    phi = det.getPhi();
+  } else {
+    // Have to average the value for phi
+    auto detectorGroup = dynamic_cast<const Geometry::DetectorGroup *>(&det);
+    if (!detectorGroup) {
+      throw std::runtime_error("Could not cast to detector group");
+    }
+    detectorGroup->getPhi();
+  }
+
+  if (phi < 0) {
+    phi += 2.0 * M_PI;
+  }
+  return phi;
+}
+
 /**
 Reduce the peak list by removing duplicates
 then convert SXPeaks objects to PeakObjects and add them to the output workspace
diff --git a/Framework/Crystal/src/PeakStatisticsTools.cpp b/Framework/Crystal/src/PeakStatisticsTools.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..de0030952fa6d183812b3ee854bf18f9eddefe6d
--- /dev/null
+++ b/Framework/Crystal/src/PeakStatisticsTools.cpp
@@ -0,0 +1,330 @@
+#include "MantidCrystal/PeakStatisticsTools.h"
+
+#include "MantidGeometry/Crystal/BasicHKLFilters.h"
+#include "MantidGeometry/Crystal/HKLGenerator.h"
+
+#include "MantidKernel/Statistics.h"
+
+#include <boost/make_shared.hpp>
+#include <numeric>
+
+namespace Mantid {
+namespace Crystal {
+namespace PeakStatisticsTools {
+
+using namespace Mantid::DataObjects;
+using namespace Mantid::Geometry;
+using namespace Mantid::Kernel;
+
+/// Returns a vector with the intensities of the Peaks stored in this
+/// reflection.
+std::vector<double> UniqueReflection::getIntensities() const {
+  std::vector<double> intensities;
+  intensities.reserve(m_peaks.size());
+
+  std::transform(
+      m_peaks.begin(), m_peaks.end(), std::back_inserter(intensities),
+      [](const DataObjects::Peak &peak) { return peak.getIntensity(); });
+
+  return intensities;
+}
+
+/// Returns a vector with the intensity sigmas of the Peaks stored in this
+/// reflection.
+std::vector<double> UniqueReflection::getSigmas() const {
+  std::vector<double> sigmas;
+  sigmas.reserve(m_peaks.size());
+
+  std::transform(
+      m_peaks.begin(), m_peaks.end(), std::back_inserter(sigmas),
+      [](const DataObjects::Peak &peak) { return peak.getSigmaIntensity(); });
+
+  return sigmas;
+}
+
+/// Removes peaks whose intensity deviates more than sigmaCritical from the
+/// intensities' mean.
+UniqueReflection UniqueReflection::removeOutliers(double sigmaCritical) const {
+  if (sigmaCritical <= 0.0) {
+    throw std::invalid_argument(
+        "Critical sigma value has to be greater than 0.");
+  }
+
+  UniqueReflection newReflection(m_hkl);
+
+  if (m_peaks.size() > 2) {
+    auto intensities = getIntensities();
+    auto zScores = Kernel::getZscore(intensities);
+
+    for (size_t i = 0; i < zScores.size(); ++i) {
+      if (zScores[i] <= sigmaCritical) {
+        newReflection.addPeak(m_peaks[i]);
+      }
+    }
+  } else {
+    for (auto peak : m_peaks) {
+      newReflection.addPeak(peak);
+    }
+  }
+
+  return newReflection;
+}
+
+/// Sets the intensities and sigmas of all stored peaks to the supplied values.
+void UniqueReflection::setPeaksIntensityAndSigma(double intensity,
+                                                 double sigma) {
+  for (auto &peak : m_peaks) {
+    peak.setIntensity(intensity);
+    peak.setSigmaIntensity(sigma);
+  }
+}
+
+/**
+ * @brief UniqueReflectionCollection::UniqueReflectionCollection
+ *
+ * Takes the supplied parameters to calculate theoretically possible
+ * unique reflections and stores a UniqueReflection for each of those
+ * internally.
+ *
+ * @param cell :: UnitCell of the sample.
+ * @param dLimits :: Resolution limits for the generated reflections.
+ * @param pointGroup :: Point group of the sample.
+ * @param centering :: Lattice centering.
+ */
+UniqueReflectionCollection::UniqueReflectionCollection(
+    const UnitCell &cell, const std::pair<double, double> &dLimits,
+    const PointGroup_sptr &pointGroup,
+    const ReflectionCondition_sptr &centering)
+    : m_reflections(), m_pointgroup(pointGroup) {
+  HKLGenerator generator(cell, dLimits.first);
+  auto dFilter = boost::make_shared<const HKLFilterDRange>(cell, dLimits.first,
+                                                           dLimits.second);
+  auto centeringFilter =
+      boost::make_shared<const HKLFilterCentering>(centering);
+  auto filter = dFilter & centeringFilter;
+
+  // Generate map of UniqueReflection-objects with reflection family as key.
+  for (const auto &hkl : generator) {
+    if (filter->isAllowed(hkl)) {
+      V3D hklFamily = m_pointgroup->getReflectionFamily(hkl);
+      m_reflections.emplace(hklFamily, UniqueReflection(hklFamily));
+    }
+  }
+}
+
+/// Assigns the supplied peaks to the proper UniqueReflection. Peaks for which
+/// the reflection family can not be found are ignored.
+void UniqueReflectionCollection::addObservations(
+    const std::vector<Peak> &peaks) {
+  for (auto const &peak : peaks) {
+    V3D hkl = peak.getHKL();
+    hkl.round();
+
+    auto reflection =
+        m_reflections.find(m_pointgroup->getReflectionFamily(hkl));
+
+    if (reflection != m_reflections.end()) {
+      (*reflection).second.addPeak(peak);
+    }
+  }
+}
+
+/// Returns a copy of the UniqueReflection with the supplied HKL. Raises an
+/// exception if the reflection is not found.
+UniqueReflection
+UniqueReflectionCollection::getReflection(const V3D &hkl) const {
+  return m_reflections.at(m_pointgroup->getReflectionFamily(hkl));
+}
+
+/// Total number of unique reflections (theoretically possible).
+size_t UniqueReflectionCollection::getUniqueReflectionCount() const {
+  return m_reflections.size();
+}
+
+/// Number of unique reflections that have more observations than the supplied
+/// number (default is 0 - gives number of ).
+size_t UniqueReflectionCollection::getObservedUniqueReflectionCount(
+    size_t moreThan) const {
+  return std::count_if(
+      m_reflections.cbegin(), m_reflections.cend(),
+      [=](const std::pair<Kernel::V3D, UniqueReflection> &item) {
+        return item.second.count() > moreThan;
+      });
+}
+
+/// List of unobserved unique reflections in resolution range.
+std::vector<V3D>
+UniqueReflectionCollection::getUnobservedUniqueReflections() const {
+  std::vector<V3D> reflections;
+  reflections.reserve(m_reflections.size());
+
+  for (const auto &reflection : m_reflections) {
+    if (reflection.second.count() == 0) {
+      reflections.push_back(reflection.first);
+    }
+  }
+
+  return reflections;
+}
+
+/// Number of observed reflections.
+size_t UniqueReflectionCollection::getObservedReflectionCount() const {
+  return std::accumulate(
+      m_reflections.cbegin(), m_reflections.cend(), size_t(0),
+      [](size_t totalReflections,
+         const std::pair<Kernel::V3D, UniqueReflection> &item) {
+        return totalReflections + item.second.count();
+      });
+}
+
+/// Returns the internally stored reflection map. May disappear or change if
+/// implementation changes.
+const std::map<V3D, UniqueReflection> &
+UniqueReflectionCollection::getReflections() const {
+  return m_reflections;
+}
+
+/**
+ * @brief PeaksStatistics::calculatePeaksStatistics
+ *
+ * This function iterates through the unique reflections map and computes
+ * statistics for the reflections/peaks. It calls
+ * UniqueReflection::removeOutliers, so outliers are removed before the
+ * statistical quantities are calculated.
+ *
+ * Furthermore it sets the intensities of each peak to the mean of the
+ * group of equivalent reflections.
+ *
+ * @param uniqueReflections :: Map of unique reflections and peaks.
+ */
+void PeaksStatistics::calculatePeaksStatistics(
+    const std::map<V3D, UniqueReflection> &uniqueReflections) {
+  double rMergeNumerator = 0.0;
+  double rPimNumerator = 0.0;
+  double intensitySumRValues = 0.0;
+  double iOverSigmaSum = 0.0;
+
+  for (const auto &unique : uniqueReflections) {
+    /* Since all possible unique reflections are explored
+     * there may be 0 observations for some of them.
+     * In that case, nothing can be done.*/
+    if (unique.second.count() > 0) {
+      ++m_uniqueReflections;
+
+      // Possibly remove outliers.
+      auto outliersRemoved = unique.second.removeOutliers();
+
+      // I/sigma is calculated for all reflections, even if there is only one
+      // observation.
+      auto intensities = outliersRemoved.getIntensities();
+      auto sigmas = outliersRemoved.getSigmas();
+
+      // Accumulate the I/sigma's for current reflection into sum
+      iOverSigmaSum += getIOverSigmaSum(sigmas, intensities);
+
+      if (outliersRemoved.count() > 1) {
+        // Get mean, standard deviation for intensities
+        auto intensityStatistics = Kernel::getStatistics(
+            intensities, StatOptions::Mean | StatOptions::UncorrectedStdDev);
+
+        double meanIntensity = intensityStatistics.mean;
+
+        /* This was in the original algorithm, not entirely sure where it is
+         * used. It's basically the sum of all relative standard deviations.
+         * In a perfect data set with all equivalent reflections exactly
+         * equivalent that would be 0. */
+        m_chiSquared += intensityStatistics.standard_deviation / meanIntensity;
+
+        // For both RMerge and RPim sum(|I - <I>|) is required
+        double sumOfDeviationsFromMean =
+            std::accumulate(intensities.begin(), intensities.end(), 0.0,
+                            [meanIntensity](double sum, double intensity) {
+                              return sum + fabs(intensity - meanIntensity);
+                            });
+
+        // Accumulate into total sum for numerator of RMerge
+        rMergeNumerator += sumOfDeviationsFromMean;
+
+        // For Rpim, the sum is weighted by a factor depending on N
+        double rPimFactor =
+            sqrt(1.0 / (static_cast<double>(outliersRemoved.count()) - 1.0));
+        rPimNumerator += (rPimFactor * sumOfDeviationsFromMean);
+
+        // Collect sum of intensities for R-value calculation
+        intensitySumRValues +=
+            std::accumulate(intensities.begin(), intensities.end(), 0.0);
+
+        // The original algorithm sets the intensities and sigmas to the mean.
+        double sqrtOfMeanSqrSigma = getRMS(sigmas);
+        outliersRemoved.setPeaksIntensityAndSigma(meanIntensity,
+                                                  sqrtOfMeanSqrSigma);
+      }
+
+      const std::vector<Peak> &reflectionPeaks = outliersRemoved.getPeaks();
+      m_peaks.insert(m_peaks.end(), reflectionPeaks.begin(),
+                     reflectionPeaks.end());
+    }
+  }
+
+  m_measuredReflections = static_cast<int>(m_peaks.size());
+
+  if (m_uniqueReflections > 0) {
+    m_redundancy = static_cast<double>(m_measuredReflections) /
+                   static_cast<double>(m_uniqueReflections);
+  }
+
+  m_completeness = static_cast<double>(m_uniqueReflections) /
+                   static_cast<double>(uniqueReflections.size());
+
+  if (intensitySumRValues > 0.0) {
+    m_rMerge = rMergeNumerator / intensitySumRValues;
+    m_rPim = rPimNumerator / intensitySumRValues;
+  }
+
+  if (m_measuredReflections > 0) {
+    m_meanIOverSigma =
+        iOverSigmaSum / static_cast<double>(m_measuredReflections);
+
+    auto dspacingLimits = getDSpacingLimits(m_peaks);
+    m_dspacingMin = dspacingLimits.first;
+    m_dspacingMax = dspacingLimits.second;
+  }
+}
+
+/// Returns the sum of all I/sigma-ratios defined by the two vectors using
+/// std::inner_product.
+double PeaksStatistics::getIOverSigmaSum(
+    const std::vector<double> &sigmas,
+    const std::vector<double> &intensities) const {
+  return std::inner_product(intensities.begin(), intensities.end(),
+                            sigmas.begin(), 0.0, std::plus<double>(),
+                            std::divides<double>());
+}
+
+/// Returns the Root mean square of the supplied vector.
+double PeaksStatistics::getRMS(const std::vector<double> &data) const {
+  double sumOfSquares =
+      std::inner_product(data.begin(), data.end(), data.begin(), 0.0);
+
+  return sqrt(sumOfSquares / static_cast<double>(data.size()));
+}
+
+/// Returns the lowest and hights wavelength in the peak list.
+std::pair<double, double>
+PeaksStatistics::getDSpacingLimits(const std::vector<Peak> &peaks) const {
+  if (peaks.empty()) {
+    return std::make_pair(0.0, 0.0);
+  }
+
+  auto dspacingLimitIterators = std::minmax_element(
+      peaks.begin(), peaks.end(), [](const Peak &lhs, const Peak &rhs) {
+        return lhs.getDSpacing() < rhs.getDSpacing();
+      });
+
+  return std::make_pair((*(dspacingLimitIterators.first)).getDSpacing(),
+                        (*(dspacingLimitIterators.second)).getDSpacing());
+}
+
+} // namespace PeakStatisticsTools
+} // namespace Crystal
+} // namespace Mantid
diff --git a/Framework/Crystal/src/PredictPeaks.cpp b/Framework/Crystal/src/PredictPeaks.cpp
index 42e04a6635526de79e45acfdf64a98b416aff5b6..c88e1bfd59c5bdf559398601376c11ca400893d3 100644
--- a/Framework/Crystal/src/PredictPeaks.cpp
+++ b/Framework/Crystal/src/PredictPeaks.cpp
@@ -1,4 +1,5 @@
 #include "MantidCrystal/PredictPeaks.h"
+#include "MantidAPI/DetectorInfo.h"
 #include "MantidAPI/IMDEventWorkspace.h"
 #include "MantidAPI/MatrixWorkspace.h"
 #include "MantidAPI/Run.h"
@@ -8,12 +9,16 @@
 #include "MantidGeometry/Crystal/HKLGenerator.h"
 #include "MantidGeometry/Crystal/StructureFactorCalculatorSummation.h"
 #include "MantidGeometry/Objects/InstrumentRayTracer.h"
+#include "MantidGeometry/Objects/BoundingBox.h"
+#include "MantidGeometry/Instrument/ReferenceFrame.h"
+#include "MantidKernel/BoundedValidator.h"
 #include "MantidKernel/ListValidator.h"
 #include "MantidKernel/EnabledWhenProperty.h"
+#include "MantidKernel/make_unique.h"
 #include "MantidGeometry/Instrument/RectangularDetector.h"
-#include "MantidKernel/BoundedValidator.h"
 #include "MantidGeometry/Crystal/EdgePixel.h"
 
+#include <fstream>
 using Mantid::Kernel::EnabledWhenProperty;
 
 namespace Mantid {
@@ -219,7 +224,7 @@ void PredictPeaks::exec() {
 
   setInstrumentFromInputWorkspace(inputExperimentInfo);
   setRunNumberFromInputWorkspace(inputExperimentInfo);
-
+  setReferenceFrameAndBeamDirection();
   checkBeamDirection();
 
   // Create the output
@@ -257,6 +262,9 @@ void PredictPeaks::exec() {
   Progress prog(this, 0.0, 1.0, possibleHKLs.size() * gonioVec.size());
   prog.setNotifyStep(0.01);
 
+  m_detectorCacheSearch =
+      Kernel::make_unique<DetectorSearcher>(m_inst, m_pw->detectorInfo());
+
   for (auto &goniometerMatrix : gonioVec) {
     // Final transformation matrix (HKL to Q in lab frame)
     DblMatrix orientedUB = goniometerMatrix * ub;
@@ -470,40 +478,96 @@ void PredictPeaks::calculateQAndAddToOutput(const V3D &hkl,
   // The q-vector direction of the peak is = goniometer * ub * hkl_vector
   // This is in inelastic convention: momentum transfer of the LATTICE!
   // Also, q does have a 2pi factor = it is equal to 2pi/wavelength.
-  V3D q = orientedUB * hkl * (2.0 * M_PI * m_qConventionFactor);
+  const auto q = orientedUB * hkl * (2.0 * M_PI * m_qConventionFactor);
+  const auto params = getPeakParametersFromQ(q);
+  const auto detectorDir = std::get<0>(params);
+  const auto wl = std::get<1>(params);
+
+  const bool useExtendedDetectorSpace =
+      getProperty("PredictPeaksOutsideDetectors");
+  const auto result = m_detectorCacheSearch->findDetectorIndex(q);
+  const auto hitDetector = std::get<0>(result);
+  const auto index = std::get<1>(result);
 
-  // Create the peak using the Q in the lab framewith all its info:
-  Peak p(m_inst, q);
-  if (m_edge > 0) {
-    if (edgePixel(m_inst, p.getBankName(), p.getCol(), p.getRow(), m_edge))
+  if (!hitDetector && !useExtendedDetectorSpace) {
+    return;
+  }
+
+  const auto &detInfo = m_pw->detectorInfo();
+  const auto &det = detInfo.detector(index);
+  std::unique_ptr<Peak> peak;
+
+  if (hitDetector) {
+    // peak hit a detector to add it to the list
+    peak = Kernel::make_unique<Peak>(m_inst, det.getID(), wl);
+    if (!peak->getDetector())
       return;
+
+  } else if (useExtendedDetectorSpace) {
+    // use extended detector space to try and guess peak position
+    const auto returnedComponent =
+        m_inst->getComponentByName("extended-detector-space");
+    // Check that the component is valid
+    const auto component =
+        boost::dynamic_pointer_cast<const ObjComponent>(returnedComponent);
+    if (!component)
+      throw std::runtime_error("PredictPeaks: user requested use of a extended "
+                               "detector space to predict peaks but there is no"
+                               "definition in the IDF");
+
+    // find where this Q vector should intersect with "extended" space
+    Geometry::Track track(detInfo.samplePosition(), detectorDir);
+    if (!component->interceptSurface(track))
+      return;
+
+    // The exit point is the vector to the place that we hit a detector
+    const auto magnitude = track.back().exitPoint.norm();
+    peak = Kernel::make_unique<Peak>(m_inst, q,
+                                     boost::optional<double>(magnitude));
   }
-  /* The constructor calls setQLabFrame, which already calls findDetector, which
-     is expensive. It's not necessary to call it again, instead it's enough to
-     check whether a detector has already been set.
-
-     Peaks are added if they fall on a detector OR is the extended detector
-     space component is defined which can be used to approximate a peak's
-     position in detector space.
-     */
-  bool useExtendedDetectorSpace = getProperty("PredictPeaksOutsideDetectors");
-  if (!p.getDetector() &&
-      !(useExtendedDetectorSpace &&
-        m_inst->getComponentByName("extended-detector-space")))
+
+  if (m_edge > 0 && edgePixel(m_inst, peak->getBankName(), peak->getCol(),
+                              peak->getRow(), m_edge))
     return;
 
   // Only add peaks that hit the detector
-  p.setGoniometerMatrix(goniometerMatrix);
+  peak->setGoniometerMatrix(goniometerMatrix);
   // Save the run number found before.
-  p.setRunNumber(m_runNumber);
-  p.setHKL(hkl * m_qConventionFactor);
+  peak->setRunNumber(m_runNumber);
+  peak->setHKL(hkl * m_qConventionFactor);
 
   if (m_sfCalculator) {
-    p.setIntensity(m_sfCalculator->getFSquared(hkl));
+    peak->setIntensity(m_sfCalculator->getFSquared(hkl));
   }
 
   // Add it to the workspace
-  m_pw->addPeak(p);
+  m_pw->addPeak(*peak);
+}
+
+/** Get the detector direction and wavelength of a peak from it's QLab vector
+ *
+ * @param q :: the q lab vector for this peak
+ * @return a tuple containing the detector direction and the wavelength
+ */
+std::tuple<V3D, double>
+PredictPeaks::getPeakParametersFromQ(const V3D &q) const {
+  double norm_q = q.norm();
+  // Default for ki-kf has -q
+  const double qBeam = q.scalar_prod(m_refBeamDir) * m_qConventionFactor;
+  double one_over_wl = (norm_q * norm_q) / (2.0 * qBeam);
+  double wl = (2.0 * M_PI) / one_over_wl;
+  // Default for ki-kf has -q
+  V3D detectorDir = q * -m_qConventionFactor;
+  detectorDir[m_refFrame->pointingAlongBeam()] = one_over_wl - qBeam;
+  detectorDir.normalize();
+  return std::make_tuple(detectorDir, wl);
+}
+
+/** Cache the reference frame and beam direction using the instrument
+ */
+void PredictPeaks::setReferenceFrameAndBeamDirection() {
+  m_refFrame = m_inst->getReferenceFrame();
+  m_refBeamDir = m_refFrame->vecPointingAlongBeam();
 }
 
 } // namespace Mantid
diff --git a/Framework/Crystal/src/SortHKL.cpp b/Framework/Crystal/src/SortHKL.cpp
index 450084846821adc915a934e963639b84098ba9f5..23ae54e962ce3e2076cf15a81a6836a55c737469 100644
--- a/Framework/Crystal/src/SortHKL.cpp
+++ b/Framework/Crystal/src/SortHKL.cpp
@@ -9,12 +9,9 @@
 
 #include "MantidGeometry/Instrument/RectangularDetector.h"
 #include "MantidGeometry/Crystal/PointGroupFactory.h"
-#include "MantidGeometry/Crystal/HKLGenerator.h"
-#include "MantidGeometry/Crystal/BasicHKLFilters.h"
 #include "MantidGeometry/Crystal/OrientedLattice.h"
 
 #include "MantidKernel/ListValidator.h"
-#include "MantidKernel/Statistics.h"
 #include "MantidKernel/Utils.h"
 
 #include <cmath>
@@ -25,6 +22,7 @@ using namespace Mantid::Geometry;
 using namespace Mantid::DataObjects;
 using namespace Mantid::Kernel;
 using namespace Mantid::API;
+using namespace Mantid::Crystal::PeakStatisticsTools;
 
 namespace Mantid {
 namespace Crystal {
@@ -91,10 +89,10 @@ void SortHKL::exec() {
 
   UnitCell cell = inputPeaksWorkspace->sample().getOrientedLattice();
 
-  std::map<V3D, UniqueReflection> uniqueReflections =
+  UniqueReflectionCollection uniqueReflections =
       getUniqueReflections(peaks, cell);
 
-  PeaksStatistics peaksStatistics(uniqueReflections, peaks.size());
+  PeaksStatistics peaksStatistics(uniqueReflections);
 
   // Store the statistics for output.
   const std::string tableName = getProperty("StatisticsTable");
@@ -145,7 +143,7 @@ SortHKL::getNonZeroPeaks(const std::vector<Peak> &inputPeaks) const {
  * @param cell :: UnitCell to use for calculation of possible reflections.
  * @return Map of unique reflections.
  */
-std::map<V3D, UniqueReflection>
+UniqueReflectionCollection
 SortHKL::getUniqueReflections(const std::vector<Peak> &peaks,
                               const UnitCell &cell) const {
   ReflectionCondition_sptr centering = getCentering();
@@ -153,18 +151,10 @@ SortHKL::getUniqueReflections(const std::vector<Peak> &peaks,
 
   std::pair<double, double> dLimits = getDLimits(peaks, cell);
 
-  std::map<V3D, UniqueReflection> uniqueReflectionInRange =
-      getPossibleUniqueReflections(cell, dLimits, pointGroup, centering);
+  UniqueReflectionCollection reflections(cell, dLimits, pointGroup, centering);
+  reflections.addObservations(peaks);
 
-  for (auto const &peak : peaks) {
-    V3D hkl = peak.getHKL();
-    hkl.round();
-
-    uniqueReflectionInRange.at(pointGroup->getReflectionFamily(hkl))
-        .addPeak(peak);
-  }
-
-  return uniqueReflectionInRange;
+  return reflections;
 }
 
 /// Returns the centering extracted from the user-supplied property.
@@ -208,47 +198,6 @@ std::pair<double, double> SortHKL::getDLimits(const std::vector<Peak> &peaks,
                         cell.d((*dLimitIterators.second).getHKL()));
 }
 
-/**
- * @brief SortHKL::getPossibleUniqueReflections
- *
- * This method returns a map that contains UniqueReflection-objects, one
- * for each unique reflection in the given resolution range. It uses the
- * given cell, point group and centering to determine which reflections
- * are allowed and which ones are equivalent.
- *
- * @param cell :: UnitCell of the sample.
- * @param dLimits :: Resolution limits for the generated reflections.
- * @param pointGroup :: Point group of the sample.
- * @param centering :: Lattice centering (important for completeness
- * calculation).
- *
- * @return Map of UniqueReflection objects with HKL of the reflection family as
- * key
- */
-std::map<V3D, UniqueReflection> SortHKL::getPossibleUniqueReflections(
-    const UnitCell &cell, const std::pair<double, double> &dLimits,
-    const PointGroup_sptr &pointGroup,
-    const ReflectionCondition_sptr &centering) const {
-
-  HKLGenerator generator(cell, dLimits.first);
-  HKLFilter_const_sptr dFilter = boost::make_shared<const HKLFilterDRange>(
-      cell, dLimits.first, dLimits.second);
-  HKLFilter_const_sptr centeringFilter =
-      boost::make_shared<const HKLFilterCentering>(centering);
-  HKLFilter_const_sptr filter = dFilter & centeringFilter;
-
-  // Generate map of UniqueReflection-objects with reflection family as key.
-  std::map<V3D, UniqueReflection> uniqueHKLs;
-  for (const auto &hkl : generator) {
-    if (filter->isAllowed(hkl)) {
-      V3D hklFamily = pointGroup->getReflectionFamily(hkl);
-      uniqueHKLs.emplace(hklFamily, UniqueReflection(hklFamily));
-    }
-  }
-
-  return uniqueHKLs;
-}
-
 /// Create a TableWorkspace for the statistics with appropriate columns or get
 /// one from the ADS.
 ITableWorkspace_sptr
@@ -310,40 +259,6 @@ PeaksWorkspace_sptr SortHKL::getOutputPeaksWorkspace(
   return outputPeaksWorkspace;
 }
 
-/// Returns the sum of all I/sigma-ratios defined by the two vectors using
-/// std::inner_product.
-double PeaksStatistics::getIOverSigmaSum(
-    const std::vector<double> &sigmas,
-    const std::vector<double> &intensities) const {
-  return std::inner_product(intensities.begin(), intensities.end(),
-                            sigmas.begin(), 0.0, std::plus<double>(),
-                            std::divides<double>());
-}
-
-/// Returns the Root mean square of the supplied vector.
-double PeaksStatistics::getRMS(const std::vector<double> &data) const {
-  double sumOfSquares =
-      std::inner_product(data.begin(), data.end(), data.begin(), 0.0);
-
-  return sqrt(sumOfSquares / static_cast<double>(data.size()));
-}
-
-/// Returns the lowest and hights wavelength in the peak list.
-std::pair<double, double>
-PeaksStatistics::getDSpacingLimits(const std::vector<Peak> &peaks) const {
-  if (peaks.empty()) {
-    return std::make_pair(0.0, 0.0);
-  }
-
-  auto dspacingLimitIterators = std::minmax_element(
-      peaks.begin(), peaks.end(), [](const Peak &lhs, const Peak &rhs) {
-        return lhs.getDSpacing() < rhs.getDSpacing();
-      });
-
-  return std::make_pair((*(dspacingLimitIterators.first)).getDSpacing(),
-                        (*(dspacingLimitIterators.second)).getDSpacing());
-}
-
 /// Sorts the peaks in the workspace by H, K and L.
 void SortHKL::sortOutputPeaksByHKL(IPeaksWorkspace_sptr outputPeaksWorkspace) {
   // Sort by HKL
@@ -352,175 +267,5 @@ void SortHKL::sortOutputPeaksByHKL(IPeaksWorkspace_sptr outputPeaksWorkspace) {
   outputPeaksWorkspace->sort(criteria);
 }
 
-/**
- * @brief PeaksStatistics::calculatePeaksStatistics
- *
- * This function iterates through the unique reflections map and computes
- * statistics for the reflections/peaks. It calls
- * UniqueReflection::removeOutliers, so outliers are removed before the
- * statistical quantities are calculated.
- *
- * Furthermore it sets the intensities of each peak to the mean of the
- * group of equivalent reflections.
- *
- * @param uniqueReflections :: Map of unique reflections and peaks.
- */
-void PeaksStatistics::calculatePeaksStatistics(
-    std::map<V3D, UniqueReflection> &uniqueReflections) {
-  double rMergeNumerator = 0.0;
-  double rPimNumerator = 0.0;
-  double intensitySumRValues = 0.0;
-  double iOverSigmaSum = 0.0;
-
-  for (auto &unique : uniqueReflections) {
-    /* Since all possible unique reflections are explored
-     * there may be 0 observations for some of them.
-     * In that case, nothing can be done.*/
-    if (unique.second.count() > 0) {
-      ++m_uniqueReflections;
-
-      // Possibly remove outliers.
-      unique.second.removeOutliers();
-
-      // I/sigma is calculated for all reflections, even if there is only one
-      // observation.
-      const std::vector<double> &intensities = unique.second.getIntensities();
-      const std::vector<double> &sigmas = unique.second.getSigmas();
-
-      // Accumulate the I/sigma's for current reflection into sum
-      iOverSigmaSum += getIOverSigmaSum(sigmas, intensities);
-
-      if (unique.second.count() > 1) {
-        // Get mean, standard deviation for intensities
-        Statistics intensityStatistics = Kernel::getStatistics(
-            intensities, StatOptions::Mean | StatOptions::UncorrectedStdDev);
-
-        double meanIntensity = intensityStatistics.mean;
-
-        /* This was in the original algorithm, not entirely sure where it is
-         * used. It's basically the sum of all relative standard deviations.
-         * In a perfect data set with all equivalent reflections exactly
-         * equivalent that would be 0. */
-        m_chiSquared += intensityStatistics.standard_deviation / meanIntensity;
-
-        // For both RMerge and RPim sum(|I - <I>|) is required
-        double sumOfDeviationsFromMean =
-            std::accumulate(intensities.begin(), intensities.end(), 0.0,
-                            [meanIntensity](double sum, double intensity) {
-                              return sum + fabs(intensity - meanIntensity);
-                            });
-
-        // Accumulate into total sum for numerator of RMerge
-        rMergeNumerator += sumOfDeviationsFromMean;
-
-        // For Rpim, the sum is weighted by a factor depending on N
-        double rPimFactor =
-            sqrt(1.0 / (static_cast<double>(unique.second.count()) - 1.0));
-        rPimNumerator += (rPimFactor * sumOfDeviationsFromMean);
-
-        // Collect sum of intensities for R-value calculation
-        intensitySumRValues +=
-            std::accumulate(intensities.begin(), intensities.end(), 0.0);
-
-        // The original algorithm sets the intensities and sigmas to the mean.
-        double sqrtOfMeanSqrSigma = getRMS(sigmas);
-        unique.second.setPeaksIntensityAndSigma(meanIntensity,
-                                                sqrtOfMeanSqrSigma);
-      }
-
-      const std::vector<Peak> &reflectionPeaks = unique.second.getPeaks();
-      m_peaks.insert(m_peaks.end(), reflectionPeaks.begin(),
-                     reflectionPeaks.end());
-    }
-  }
-
-  m_measuredReflections = static_cast<int>(m_peaks.size());
-
-  if (m_uniqueReflections > 0) {
-    m_redundancy = static_cast<double>(m_measuredReflections) /
-                   static_cast<double>(m_uniqueReflections);
-  }
-
-  m_completeness = static_cast<double>(m_uniqueReflections) /
-                   static_cast<double>(uniqueReflections.size());
-
-  if (intensitySumRValues > 0.0) {
-    m_rMerge = rMergeNumerator / intensitySumRValues;
-    m_rPim = rPimNumerator / intensitySumRValues;
-  }
-
-  if (m_measuredReflections > 0) {
-    m_meanIOverSigma =
-        iOverSigmaSum / static_cast<double>(m_measuredReflections);
-
-    std::pair<double, double> dspacingLimits = getDSpacingLimits(m_peaks);
-    m_dspacingMin = dspacingLimits.first;
-    m_dspacingMax = dspacingLimits.second;
-  }
-}
-
-/// Returns a vector with the intensities of the Peaks stored in this
-/// reflection.
-std::vector<double> UniqueReflection::getIntensities() const {
-  std::vector<double> intensities;
-  intensities.reserve(m_peaks.size());
-
-  std::transform(
-      m_peaks.begin(), m_peaks.end(), std::back_inserter(intensities),
-      [](const DataObjects::Peak &peak) { return peak.getIntensity(); });
-
-  return intensities;
-}
-
-/// Returns a vector with the intensity sigmas of the Peaks stored in this
-/// reflection.
-std::vector<double> UniqueReflection::getSigmas() const {
-  std::vector<double> sigmas;
-  sigmas.reserve(m_peaks.size());
-
-  std::transform(
-      m_peaks.begin(), m_peaks.end(), std::back_inserter(sigmas),
-      [](const DataObjects::Peak &peak) { return peak.getSigmaIntensity(); });
-
-  return sigmas;
-}
-
-/// Removes peaks whose intensity deviates more than sigmaCritical from the
-/// intensities' mean.
-void UniqueReflection::removeOutliers(double sigmaCritical) {
-  if (sigmaCritical <= 0.0) {
-    throw std::invalid_argument(
-        "Critical sigma value has to be greater than 0.");
-  }
-
-  if (m_peaks.size() > 2) {
-    const std::vector<double> &intensities = getIntensities();
-    const std::vector<double> &zScores = Kernel::getZscore(intensities);
-
-    std::vector<size_t> outlierIndices;
-    for (size_t i = 0; i < zScores.size(); ++i) {
-      if (zScores[i] > sigmaCritical) {
-        outlierIndices.push_back(i);
-      }
-    }
-
-    if (!outlierIndices.empty()) {
-      for (auto it = outlierIndices.rbegin(); it != outlierIndices.rend();
-           ++it) {
-        m_peaks.erase(m_peaks.begin() + (*it));
-      }
-    }
-  }
-}
-
-/// Sets the intensities and sigmas of all stored peaks to the supplied values.
-void UniqueReflection::setPeaksIntensityAndSigma(double intensity,
-                                                 double sigma) {
-  for (auto &peak : m_peaks) {
-    peak.setIntensity(intensity);
-    peak.setSigmaIntensity(sigma);
-  }
-}
-
 } // namespace Mantid
 } // namespace Crystal
diff --git a/Framework/Crystal/test/PeakStatisticsToolsTest.h b/Framework/Crystal/test/PeakStatisticsToolsTest.h
new file mode 100644
index 0000000000000000000000000000000000000000..779df462ee03d7bdd77fc92a82f02198f912161c
--- /dev/null
+++ b/Framework/Crystal/test/PeakStatisticsToolsTest.h
@@ -0,0 +1,360 @@
+#ifndef MANTID_CRYSTAL_PEAKSTATISTICSTOOLSTEST_H_
+#define MANTID_CRYSTAL_PEAKSTATISTICSTOOLSTEST_H_
+
+#include <cxxtest/TestSuite.h>
+
+#include "MantidCrystal/PeakStatisticsTools.h"
+#include "MantidGeometry/Crystal/PointGroupFactory.h"
+#include "MantidDataObjects/Peak.h"
+
+using namespace Mantid::Crystal;
+using namespace Mantid::Crystal::PeakStatisticsTools;
+using namespace Mantid::DataObjects;
+using namespace Mantid::Geometry;
+using namespace Mantid::Kernel;
+
+namespace {
+std::vector<Peak> getPeaksWithIandSigma(const std::vector<double> &intensity,
+                                        const std::vector<double> &sigma,
+                                        const V3D &hkl = V3D(0, 0, 1)) {
+  std::vector<Peak> peaks;
+  std::transform(intensity.begin(), intensity.end(), sigma.begin(),
+                 std::back_inserter(peaks),
+                 [hkl](double intensity, double sigma) {
+                   Peak peak;
+                   peak.setIntensity(intensity);
+                   peak.setSigmaIntensity(sigma);
+                   peak.setHKL(hkl);
+                   return peak;
+                 });
+
+  return peaks;
+}
+
+UniqueReflection getReflectionWithPeaks(const std::vector<double> &intensities,
+                                        const std::vector<double> &sigmas,
+                                        double wavelength = 0.0) {
+  std::vector<Peak> peaks = getPeaksWithIandSigma(intensities, sigmas);
+
+  if (wavelength > 0) {
+    for (auto &peak : peaks) {
+      peak.setWavelength(wavelength);
+    }
+  }
+
+  UniqueReflection reflection(V3D(2, 3, 4));
+  for (auto peak : peaks) {
+    reflection.addPeak(peak);
+  }
+
+  return reflection;
+}
+
+UniqueReflectionCollection
+getUniqueReflectionCollection(double a, const std::string &centering,
+                              const std::string &pointGroup, double dMin) {
+  UnitCell cell(a, a, a);
+  PointGroup_sptr pg =
+      PointGroupFactory::Instance().createPointGroup(pointGroup);
+  ReflectionCondition_sptr cent = getReflectionConditionBySymbol(centering);
+
+  return UniqueReflectionCollection(cell, std::make_pair(dMin, 100.0), pg,
+                                    cent);
+}
+
+class MockUniqueReflectionCollection : public UniqueReflectionCollection {
+public:
+  explicit MockUniqueReflectionCollection(
+      const std::map<V3D, UniqueReflection> &reflections,
+      const PointGroup_sptr &pointGroup =
+          PointGroupFactory::Instance().createPointGroup("1"))
+      : UniqueReflectionCollection(reflections, pointGroup) {}
+};
+}
+
+class PeakStatisticsToolsTest : public CxxTest::TestSuite {
+public:
+  // This pair of boilerplate methods prevent the suite being created statically
+  // This means the constructor isn't called when running other tests
+  static PeakStatisticsToolsTest *createSuite() {
+    return new PeakStatisticsToolsTest();
+  }
+  static void destroySuite(PeakStatisticsToolsTest *suite) { delete suite; }
+
+  void test_UniqueReflectionsConstructor() {
+    V3D hkl(1, 1, 1);
+    UniqueReflection reflection(hkl);
+
+    TSM_ASSERT_EQUALS("Constructed UniqueReflection does not have 0 peaks.",
+                      reflection.count(), 0);
+    TSM_ASSERT_EQUALS(
+        "HKL is not equal to constructor argument in UniqueReflection",
+        reflection.getHKL(), hkl);
+  }
+
+  void test_UniqueReflectionsPeaks() {
+    UniqueReflection reflection(V3D(2, 3, 4));
+
+    Peak peak;
+    TS_ASSERT_THROWS_NOTHING(reflection.addPeak(peak));
+    TSM_ASSERT_EQUALS("UniqueReflection count is not 1 after adding peak.",
+                      reflection.count(), 1);
+    TSM_ASSERT_EQUALS(
+        "UniqueReflection peaks vector size is not 1 after adding peak.",
+        reflection.getPeaks().size(), 1);
+  }
+
+  void test_UniqueReflectionsGetIntensitiesAndSigmas() {
+    UniqueReflection reflection(V3D(2, 3, 4));
+
+    std::vector<Peak> peaks = getPeaksWithIandSigma({30.0, 34.0}, {4.5, 6.5});
+    for (auto peak : peaks) {
+      reflection.addPeak(peak);
+    }
+
+    std::vector<double> intensities = reflection.getIntensities();
+    TSM_ASSERT_EQUALS("Intensity vector from UniqueReflection has wrong size.",
+                      intensities.size(), 2);
+    TS_ASSERT_EQUALS(intensities[0], 30.0);
+    TS_ASSERT_EQUALS(intensities[1], 34.0);
+
+    std::vector<double> sigmas = reflection.getSigmas();
+    TSM_ASSERT_EQUALS("Sigma vector from UniqueReflection has wrong size.",
+                      sigmas.size(), 2);
+    TS_ASSERT_EQUALS(sigmas[0], 4.5);
+    TS_ASSERT_EQUALS(sigmas[1], 6.5);
+  }
+
+  void test_UniqueReflectionRemoveOutliersSigmaCrit() {
+    UniqueReflection reflection(V3D(2, 3, 4));
+    TS_ASSERT_THROWS_NOTHING(reflection.removeOutliers(3.0));
+    TS_ASSERT_THROWS(reflection.removeOutliers(0.0), std::invalid_argument);
+    TS_ASSERT_THROWS(reflection.removeOutliers(-10.0), std::invalid_argument);
+  }
+
+  void test_UniqueReflectionRemoveOutliersFewPeaks() {
+    std::vector<Peak> peaks = getPeaksWithIandSigma({30.0, 34.0}, {4.5, 6.5});
+
+    UniqueReflection reflection(V3D(2, 3, 4));
+    reflection.addPeak(peaks[0]);
+
+    TS_ASSERT_THROWS_NOTHING(reflection.removeOutliers());
+
+    auto outliersRemoved = reflection.removeOutliers();
+    TSM_ASSERT_EQUALS("Peak was removed as outlier although there's only 1.",
+                      outliersRemoved.count(), 1);
+
+    reflection.addPeak(peaks[1]);
+
+    TS_ASSERT_THROWS_NOTHING(reflection.removeOutliers());
+
+    outliersRemoved = reflection.removeOutliers();
+    TSM_ASSERT_EQUALS("Peak was removed as outlier although there's only 2.",
+                      outliersRemoved.count(), 2);
+  }
+
+  void test_UniqueReflectionRemoveOutliers() {
+    UniqueReflection reflection =
+        getReflectionWithPeaks({30.0, 34.0, 32.0, 31.0}, {4.5, 6.5, 10.0, 2.3});
+
+    // standard deviation is 1.70782512765993
+    auto cleanReflection = reflection.removeOutliers();
+    TSM_ASSERT_EQUALS(
+        "UniqueReflection removed outlier although it should not.",
+        cleanReflection.count(), 4);
+
+    cleanReflection = reflection.removeOutliers(2.0);
+    TSM_ASSERT_EQUALS(
+        "UniqueReflection removed outlier although it should not.",
+        cleanReflection.count(), 4);
+
+    cleanReflection = reflection.removeOutliers(1.0);
+    TSM_ASSERT_EQUALS(
+        "UniqueReflection did not remove outliers although it should have.",
+        cleanReflection.count(), 2);
+
+    std::vector<double> cleanIntensities = cleanReflection.getIntensities();
+    TS_ASSERT_EQUALS(cleanIntensities[0], 32.0);
+    TS_ASSERT_EQUALS(cleanIntensities[1], 31.0);
+  }
+
+  void test_UniqueReflectionSetIntensityAndSigma() {
+    UniqueReflection reflection =
+        getReflectionWithPeaks({30.0, 34.0, 32.0, 31.0}, {4.5, 6.5, 10.0, 2.3});
+
+    reflection.setPeaksIntensityAndSigma(10.0, 0.1);
+
+    for (auto peak : reflection.getPeaks()) {
+      TSM_ASSERT_EQUALS(
+          "Incorrect peak intensity after set in UniqueReflection.",
+          peak.getIntensity(), 10.0);
+      TSM_ASSERT_EQUALS("Incorrect peak sigma after set in UniqueReflection.",
+                        peak.getSigmaIntensity(), 0.1);
+    }
+  }
+
+  void test_UniqueReflectionCollectionEmpty() {
+    UniqueReflectionCollection reflections =
+        getUniqueReflectionCollection(3.0, "P", "m-3m", 1.5);
+
+    // There should be 4 reflections: 001, 011, 111, 002
+    TS_ASSERT_EQUALS(reflections.getUniqueReflectionCount(), 4);
+
+    // Uses point group to retrieve UniqueReflections
+    TS_ASSERT_THROWS_NOTHING(reflections.getReflection(V3D(0, 0, 1)));
+    TS_ASSERT_THROWS_NOTHING(reflections.getReflection(V3D(0, 0, -1)));
+
+    TS_ASSERT_THROWS_NOTHING(reflections.getReflection(V3D(0, 1, 1)));
+    TS_ASSERT_THROWS_NOTHING(reflections.getReflection(V3D(1, 1, 1)));
+    TS_ASSERT_THROWS_NOTHING(reflections.getReflection(V3D(0, 0, 2)));
+
+    // Reflections that do not exist throw some exception
+    TS_ASSERT_THROWS_ANYTHING(reflections.getReflection(V3D(0, 0, 3)));
+    TS_ASSERT_THROWS_ANYTHING(reflections.getReflection(V3D(2, -1, 0)));
+
+    // No observations
+    TS_ASSERT_EQUALS(reflections.getObservedReflectionCount(), 0);
+    TS_ASSERT_EQUALS(reflections.getObservedUniqueReflectionCount(), 0);
+  }
+
+  void test_UniqueReflectionCollectionAddObservations() {
+    UniqueReflectionCollection reflections =
+        getUniqueReflectionCollection(3.0, "P", "m-3m", 1.5);
+
+    TS_ASSERT_EQUALS(reflections.getObservedReflectionCount(), 0);
+    TS_ASSERT_EQUALS(reflections.getObservedUniqueReflectionCount(), 0);
+    TS_ASSERT_EQUALS(reflections.getUnobservedUniqueReflections().size(), 4);
+
+    reflections.addObservations(
+        getPeaksWithIandSigma({1.0, 1.0}, {2.0, 2.0}, V3D(1, 0, 0)));
+
+    TS_ASSERT_EQUALS(reflections.getObservedReflectionCount(), 2);
+    TS_ASSERT_EQUALS(reflections.getObservedUniqueReflectionCount(), 1);
+    TS_ASSERT_EQUALS(reflections.getUnobservedUniqueReflections().size(), 3);
+
+    // out-of-range peaks are ignored, so the reflection counts do not change
+    reflections.addObservations(
+        getPeaksWithIandSigma({1.0, 1.0}, {2.0, 2.0}, V3D(0, 5, 0)));
+
+    TS_ASSERT_EQUALS(reflections.getObservedReflectionCount(), 2);
+    TS_ASSERT_EQUALS(reflections.getObservedUniqueReflectionCount(), 1);
+  }
+
+  void test_UniqueReflectionCollectionReflectionCounts() {
+    UniqueReflectionCollection reflections =
+        getUniqueReflectionCollection(3.0, "P", "m-3m", 1.5);
+
+    reflections.addObservations(
+        getPeaksWithIandSigma({1.0, 1.0}, {2.0, 2.0}, V3D(1, 0, 0)));
+    reflections.addObservations(
+        getPeaksWithIandSigma({1.0, 1.0, 2.0}, {2.0, 2.0, 3.0}, V3D(1, 1, 0)));
+
+    TS_ASSERT_EQUALS(reflections.getObservedReflectionCount(), 5);
+    TS_ASSERT_EQUALS(reflections.getObservedUniqueReflectionCount(), 2);
+    TS_ASSERT_EQUALS(reflections.getObservedUniqueReflectionCount(2), 1);
+    TS_ASSERT_EQUALS(reflections.getObservedUniqueReflectionCount(3), 0);
+
+    TS_ASSERT_EQUALS(reflections.getUnobservedUniqueReflections().size(), 2);
+  }
+
+  void test_PeaksStatisticsNoObservation() {
+    std::map<V3D, UniqueReflection> uniques;
+    uniques.insert(
+        std::make_pair(V3D(1, 1, 1), UniqueReflection(V3D(1, 1, 1))));
+    MockUniqueReflectionCollection reflections(uniques);
+
+    PeaksStatistics statistics(reflections);
+    TS_ASSERT_EQUALS(statistics.m_peaks.size(), 0);
+    TS_ASSERT_EQUALS(statistics.m_uniqueReflections, 0);
+    TS_ASSERT_EQUALS(statistics.m_redundancy, 0.0);
+    TS_ASSERT_EQUALS(statistics.m_completeness, 0.0);
+    TS_ASSERT_EQUALS(statistics.m_rMerge, 0.0);
+    TS_ASSERT_EQUALS(statistics.m_rPim, 0.0);
+    TS_ASSERT_EQUALS(statistics.m_meanIOverSigma, 0.0);
+  }
+
+  void test_PeaksStatisticsOneObservation() {
+    std::map<V3D, UniqueReflection> uniques{
+        {{1, 1, 1}, getReflectionWithPeaks({56.0}, {4.5}, 1.0)}};
+    MockUniqueReflectionCollection reflections(uniques);
+
+    PeaksStatistics statistics(reflections);
+    TS_ASSERT_EQUALS(statistics.m_peaks.size(), 1);
+    TS_ASSERT_EQUALS(statistics.m_uniqueReflections, 1);
+    TS_ASSERT_EQUALS(statistics.m_redundancy, 1.0);
+    TS_ASSERT_EQUALS(statistics.m_completeness, 1.0);
+    TS_ASSERT_EQUALS(statistics.m_rMerge, 0.0);
+    TS_ASSERT_EQUALS(statistics.m_rPim, 0.0);
+    TS_ASSERT_EQUALS(statistics.m_meanIOverSigma, 56.0 / 4.5);
+  }
+
+  void test_PeaksStatisticsOneObservationTwoUnique() {
+    std::map<V3D, UniqueReflection> uniques{
+        {{1, 1, 1}, getReflectionWithPeaks({56.0}, {4.5}, 1.0)},
+        {{1, 1, 2}, UniqueReflection(V3D(1, 1, 2))}};
+    MockUniqueReflectionCollection reflections(uniques);
+
+    PeaksStatistics statistics(reflections);
+    TS_ASSERT_EQUALS(statistics.m_peaks.size(), 1);
+    TS_ASSERT_EQUALS(statistics.m_uniqueReflections, 1);
+    TS_ASSERT_EQUALS(statistics.m_redundancy, 1.0);
+    TS_ASSERT_EQUALS(statistics.m_completeness, 0.5);
+    TS_ASSERT_EQUALS(statistics.m_rMerge, 0.0);
+    TS_ASSERT_EQUALS(statistics.m_rPim, 0.0);
+    TS_ASSERT_EQUALS(statistics.m_meanIOverSigma, 56.0 / 4.5);
+  }
+
+  void test_PeaksStatisticsTwoObservationTwoUnique() {
+
+    std::map<V3D, UniqueReflection> uniques{
+        {{1, 1, 1}, getReflectionWithPeaks({10.0}, {1.0}, 1.0)},
+        {{1, 1, 2}, getReflectionWithPeaks({20.0}, {1.0}, 2.0)}};
+    MockUniqueReflectionCollection reflections(uniques);
+
+    PeaksStatistics statistics(reflections);
+    TS_ASSERT_EQUALS(statistics.m_peaks.size(), 2);
+    TS_ASSERT_EQUALS(statistics.m_uniqueReflections, 2);
+    TS_ASSERT_EQUALS(statistics.m_redundancy, 1.0);
+    TS_ASSERT_EQUALS(statistics.m_completeness, 1.0);
+    TS_ASSERT_EQUALS(statistics.m_rMerge, 0.0);
+    TS_ASSERT_EQUALS(statistics.m_rPim, 0.0);
+    TS_ASSERT_EQUALS(statistics.m_meanIOverSigma, 15.0);
+  }
+
+  void test_PeaksStatisticsTwoObservationOneUnique() {
+    std::map<V3D, UniqueReflection> uniques{
+        {{1, 1, 1}, getReflectionWithPeaks({10.0, 20.0}, {0.1, 0.1}, 1.0)}};
+    MockUniqueReflectionCollection reflections(uniques);
+
+    PeaksStatistics statistics(reflections);
+    TS_ASSERT_EQUALS(statistics.m_peaks.size(), 2);
+    TS_ASSERT_EQUALS(statistics.m_uniqueReflections, 1);
+    TS_ASSERT_EQUALS(statistics.m_redundancy, 2.0);
+    TS_ASSERT_EQUALS(statistics.m_completeness, 1.0);
+    // <I> = 15, sum(I) = 30, sum(|I - <I>|) = 10, rMerge = 10 / 30 = 0.33
+    TS_ASSERT_EQUALS(statistics.m_rMerge, 1.0 / 3.0);
+    // For 2 observations this is the same since sqrt(1 / (2 - 1)) = 1
+    TS_ASSERT_EQUALS(statistics.m_rPim, 1.0 / 3.0);
+    TS_ASSERT_EQUALS(statistics.m_meanIOverSigma, 150.0);
+  }
+
+  void test_PeaksStatisticsThreeObservationOneUnique() {
+    std::map<V3D, UniqueReflection> uniques{
+        {{1, 1, 1},
+         getReflectionWithPeaks({10.0, 20.0, 15.0}, {0.1, 0.1, 0.1}, 1.0)}};
+    MockUniqueReflectionCollection reflections(uniques);
+
+    PeaksStatistics statistics(reflections);
+    TS_ASSERT_EQUALS(statistics.m_peaks.size(), 3);
+    TS_ASSERT_EQUALS(statistics.m_uniqueReflections, 1);
+    TS_ASSERT_EQUALS(statistics.m_redundancy, 3.0);
+    TS_ASSERT_EQUALS(statistics.m_completeness, 1.0);
+    // <I> = 15, sum(I) = 45, sum(|I - <I>|) = 10, rMerge = 10 / 45 = 0.222
+    TS_ASSERT_EQUALS(statistics.m_rMerge, 1.0 / 4.5);
+    // For rpim the factor is  sqrt(1 / (3 - 1)) = sqrt(0.5)
+    TS_ASSERT_EQUALS(statistics.m_rPim, sqrt(0.5) / 4.5);
+    TS_ASSERT_EQUALS(statistics.m_meanIOverSigma, 150.0);
+  }
+};
+
+#endif /* MANTID_CRYSTAL_PEAKSTATISTICSTOOLSTEST_H_ */
diff --git a/Framework/Crystal/test/PredictPeaksTest.h b/Framework/Crystal/test/PredictPeaksTest.h
index 8e30cd5026e9abf68b7b15a1a7141bbf5964e403..ef297ab7108c36d69cc87f019564c751fb405c85 100644
--- a/Framework/Crystal/test/PredictPeaksTest.h
+++ b/Framework/Crystal/test/PredictPeaksTest.h
@@ -130,7 +130,38 @@ public:
   }
 
   void test_exec_withExtendedDetectorSpaceOptionCheckedNoDefinition() {
-    do_test_exec("Primitive", 10, std::vector<V3D>(), 1, true, false);
+    std::string outWSName("PredictPeaksTest_OutputWS");
+    // Make the fake input workspace
+    auto inWS = WorkspaceCreationHelper::create2DWorkspace(10000, 1);
+    auto inst =
+        ComponentCreationHelper::createTestInstrumentRectangular(1, 100);
+    inWS->setInstrument(inst);
+
+    // Set ub and Goniometer rotation
+    WorkspaceCreationHelper::setOrientedLattice(inWS, 12.0, 12.0, 12.0);
+    WorkspaceCreationHelper::setGoniometer(inWS, 0., 0., 0.);
+
+    PredictPeaks alg;
+    TS_ASSERT_THROWS_NOTHING(alg.initialize())
+    TS_ASSERT(alg.isInitialized())
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty(
+        "InputWorkspace", boost::dynamic_pointer_cast<Workspace>(inWS)));
+    TS_ASSERT_THROWS_NOTHING(
+        alg.setPropertyValue("OutputWorkspace", outWSName));
+    TS_ASSERT_THROWS_NOTHING(alg.setPropertyValue("WavelengthMin", "0.1"));
+    TS_ASSERT_THROWS_NOTHING(alg.setPropertyValue("WavelengthMax", "10.0"));
+    TS_ASSERT_THROWS_NOTHING(alg.setPropertyValue("MinDSpacing", "1.0"));
+    TS_ASSERT_THROWS_NOTHING(
+        alg.setPropertyValue("ReflectionCondition", "Primitive"));
+    TS_ASSERT_THROWS_NOTHING(
+        alg.setProperty("PredictPeaksOutsideDetectors", true));
+    alg.execute();
+
+    // should fail to execute and throw a runtime error
+    TS_ASSERT(!alg.isExecuted());
+
+    // Remove workspace from the data service.
+    AnalysisDataService::Instance().remove(outWSName);
   }
 
   void test_exec_withInputHKLList() {
@@ -216,4 +247,54 @@ public:
   }
 };
 
+class PredictPeaksTestPerformance : public CxxTest::TestSuite {
+public:
+  void test_manyPeaksRectangular() {
+    MatrixWorkspace_sptr inWS =
+        WorkspaceCreationHelper::create2DWorkspace(10000, 1);
+    Instrument_sptr inst =
+        ComponentCreationHelper::createTestInstrumentRectangular2(1, 100);
+    inWS->setInstrument(inst);
+
+    // Set ub and Goniometer rotation
+    WorkspaceCreationHelper::setOrientedLattice(inWS, 12.0, 12.0, 12.0);
+    WorkspaceCreationHelper::setGoniometer(inWS, 0., 0., 0.);
+
+    PredictPeaks alg;
+    alg.initialize();
+    alg.setProperty("InputWorkspace",
+                    boost::dynamic_pointer_cast<Workspace>(inWS));
+    alg.setPropertyValue("OutputWorkspace", "predict_peaks_performance");
+    alg.setPropertyValue("WavelengthMin", ".5");
+    alg.setPropertyValue("WavelengthMax", "15.0");
+    alg.setPropertyValue("MinDSpacing", ".1");
+    alg.setPropertyValue("ReflectionCondition", "Primitive");
+    alg.execute();
+  }
+
+  void test_manyPeaks() {
+    MatrixWorkspace_sptr inWS =
+        WorkspaceCreationHelper::create2DWorkspace(10000, 1);
+    Instrument_sptr inst =
+        ComponentCreationHelper::createTestInstrumentCylindrical(
+            3, V3D(0, 0, -1), V3D(0, 0, 0), 1.6, 1.0);
+    inWS->setInstrument(inst);
+
+    // Set UB matrix and Goniometer rotation
+    WorkspaceCreationHelper::setOrientedLattice(inWS, 12.0, 12.0, 12.0);
+    WorkspaceCreationHelper::setGoniometer(inWS, 0., 0., 0.);
+
+    PredictPeaks alg;
+    alg.initialize();
+    alg.setProperty("InputWorkspace",
+                    boost::dynamic_pointer_cast<Workspace>(inWS));
+    alg.setPropertyValue("OutputWorkspace", "predict_peaks_performance");
+    alg.setPropertyValue("WavelengthMin", ".5");
+    alg.setPropertyValue("WavelengthMax", "15.0");
+    alg.setPropertyValue("MinDSpacing", ".1");
+    alg.setPropertyValue("ReflectionCondition", "Primitive");
+    alg.execute();
+  }
+};
+
 #endif /* MANTID_CRYSTAL_PREDICTPEAKSTEST_H_ */
diff --git a/Framework/Crystal/test/SortHKLTest.h b/Framework/Crystal/test/SortHKLTest.h
index db8210cfd56239f17121b21a750e2e0e1903e849..b16f1f18e2c9e430ea855216a368ca80362f50f4 100644
--- a/Framework/Crystal/test/SortHKLTest.h
+++ b/Framework/Crystal/test/SortHKLTest.h
@@ -27,207 +27,6 @@ using namespace Mantid::PhysicalConstants;
 
 class SortHKLTest : public CxxTest::TestSuite {
 public:
-  void test_UniqueReflectionsConstructor() {
-    V3D hkl(1, 1, 1);
-    UniqueReflection reflection(hkl);
-
-    TSM_ASSERT_EQUALS("Constructed UniqueReflection does not have 0 peaks.",
-                      reflection.count(), 0);
-    TSM_ASSERT_EQUALS(
-        "HKL is not equal to constructor argument in UniqueReflection",
-        reflection.getHKL(), hkl);
-  }
-
-  void test_UniqueReflectionsPeaks() {
-    UniqueReflection reflection(V3D(2, 3, 4));
-
-    Peak peak;
-    TS_ASSERT_THROWS_NOTHING(reflection.addPeak(peak));
-    TSM_ASSERT_EQUALS("UniqueReflection count is not 1 after adding peak.",
-                      reflection.count(), 1);
-    TSM_ASSERT_EQUALS(
-        "UniqueReflection peaks vector size is not 1 after adding peak.",
-        reflection.getPeaks().size(), 1);
-  }
-
-  void test_UniqueReflectionsGetIntensitiesAndSigmas() {
-    UniqueReflection reflection(V3D(2, 3, 4));
-
-    std::vector<Peak> peaks = getPeaksWithIandSigma({30.0, 34.0}, {4.5, 6.5});
-    for (auto peak : peaks) {
-      reflection.addPeak(peak);
-    }
-
-    std::vector<double> intensities = reflection.getIntensities();
-    TSM_ASSERT_EQUALS("Intensity vector from UniqueReflection has wrong size.",
-                      intensities.size(), 2);
-    TS_ASSERT_EQUALS(intensities[0], 30.0);
-    TS_ASSERT_EQUALS(intensities[1], 34.0);
-
-    std::vector<double> sigmas = reflection.getSigmas();
-    TSM_ASSERT_EQUALS("Sigma vector from UniqueReflection has wrong size.",
-                      sigmas.size(), 2);
-    TS_ASSERT_EQUALS(sigmas[0], 4.5);
-    TS_ASSERT_EQUALS(sigmas[1], 6.5);
-  }
-
-  void test_UniqueReflectionRemoveOutliersSigmaCrit() {
-    UniqueReflection reflection(V3D(2, 3, 4));
-    TS_ASSERT_THROWS_NOTHING(reflection.removeOutliers(3.0));
-    TS_ASSERT_THROWS(reflection.removeOutliers(0.0), std::invalid_argument);
-    TS_ASSERT_THROWS(reflection.removeOutliers(-10.0), std::invalid_argument);
-  }
-
-  void test_UniqueReflectionRemoveOutliersFewPeaks() {
-    std::vector<Peak> peaks = getPeaksWithIandSigma({30.0, 34.0}, {4.5, 6.5});
-
-    UniqueReflection reflection(V3D(2, 3, 4));
-    reflection.addPeak(peaks[0]);
-
-    TS_ASSERT_THROWS_NOTHING(reflection.removeOutliers());
-    TSM_ASSERT_EQUALS("Peak was removed as outlier although there's only 1.",
-                      reflection.count(), 1);
-
-    reflection.addPeak(peaks[1]);
-
-    TS_ASSERT_THROWS_NOTHING(reflection.removeOutliers());
-    TSM_ASSERT_EQUALS("Peak was removed as outlier although there's only 2.",
-                      reflection.count(), 2);
-  }
-
-  void test_UniqueReflectionRemoveOutliers() {
-    UniqueReflection reflection =
-        getReflectionWithPeaks({30.0, 34.0, 32.0, 31.0}, {4.5, 6.5, 10.0, 2.3});
-
-    // standard deviation is 1.70782512765993
-    reflection.removeOutliers();
-    TSM_ASSERT_EQUALS(
-        "UniqueReflection removed outlier although it should not.",
-        reflection.count(), 4);
-
-    reflection.removeOutliers(2.0);
-    TSM_ASSERT_EQUALS(
-        "UniqueReflection removed outlier although it should not.",
-        reflection.count(), 4);
-
-    reflection.removeOutliers(1.0);
-    TSM_ASSERT_EQUALS(
-        "UniqueReflection did not remove outliers although it should have.",
-        reflection.count(), 2);
-
-    std::vector<double> cleanIntensities = reflection.getIntensities();
-    TS_ASSERT_EQUALS(cleanIntensities[0], 32.0);
-    TS_ASSERT_EQUALS(cleanIntensities[1], 31.0);
-  }
-
-  void test_UniqueReflectionSetIntensityAndSigma() {
-    UniqueReflection reflection =
-        getReflectionWithPeaks({30.0, 34.0, 32.0, 31.0}, {4.5, 6.5, 10.0, 2.3});
-
-    reflection.setPeaksIntensityAndSigma(10.0, 0.1);
-
-    for (auto peak : reflection.getPeaks()) {
-      TSM_ASSERT_EQUALS(
-          "Incorrect peak intensity after set in UniqueReflection.",
-          peak.getIntensity(), 10.0);
-      TSM_ASSERT_EQUALS("Incorrect peak sigma after set in UniqueReflection.",
-                        peak.getSigmaIntensity(), 0.1);
-    }
-  }
-
-  void test_PeaksStatisticsNoObservation() {
-    std::map<V3D, UniqueReflection> uniques;
-    uniques.insert(
-        std::make_pair(V3D(1, 1, 1), UniqueReflection(V3D(1, 1, 1))));
-
-    PeaksStatistics statistics(uniques, 0);
-    TS_ASSERT_EQUALS(statistics.m_peaks.size(), 0);
-    TS_ASSERT_EQUALS(statistics.m_uniqueReflections, 0);
-    TS_ASSERT_EQUALS(statistics.m_redundancy, 0.0);
-    TS_ASSERT_EQUALS(statistics.m_completeness, 0.0);
-    TS_ASSERT_EQUALS(statistics.m_rMerge, 0.0);
-    TS_ASSERT_EQUALS(statistics.m_rPim, 0.0);
-    TS_ASSERT_EQUALS(statistics.m_meanIOverSigma, 0.0);
-  }
-
-  void test_PeaksStatisticsOneObservation() {
-    std::map<V3D, UniqueReflection> uniques{
-        {{1, 1, 1}, getReflectionWithPeaks({56.0}, {4.5}, 1.0)}};
-
-    PeaksStatistics statistics(uniques, 1);
-    TS_ASSERT_EQUALS(statistics.m_peaks.size(), 1);
-    TS_ASSERT_EQUALS(statistics.m_uniqueReflections, 1);
-    TS_ASSERT_EQUALS(statistics.m_redundancy, 1.0);
-    TS_ASSERT_EQUALS(statistics.m_completeness, 1.0);
-    TS_ASSERT_EQUALS(statistics.m_rMerge, 0.0);
-    TS_ASSERT_EQUALS(statistics.m_rPim, 0.0);
-    TS_ASSERT_EQUALS(statistics.m_meanIOverSigma, 56.0 / 4.5);
-  }
-
-  void test_PeaksStatisticsOneObservationTwoUnique() {
-    std::map<V3D, UniqueReflection> uniques{
-        {{1, 1, 1}, getReflectionWithPeaks({56.0}, {4.5}, 1.0)},
-        {{1, 1, 2}, UniqueReflection(V3D(1, 1, 2))}};
-
-    PeaksStatistics statistics(uniques, 1);
-    TS_ASSERT_EQUALS(statistics.m_peaks.size(), 1);
-    TS_ASSERT_EQUALS(statistics.m_uniqueReflections, 1);
-    TS_ASSERT_EQUALS(statistics.m_redundancy, 1.0);
-    TS_ASSERT_EQUALS(statistics.m_completeness, 0.5);
-    TS_ASSERT_EQUALS(statistics.m_rMerge, 0.0);
-    TS_ASSERT_EQUALS(statistics.m_rPim, 0.0);
-    TS_ASSERT_EQUALS(statistics.m_meanIOverSigma, 56.0 / 4.5);
-  }
-
-  void test_PeaksStatisticsTwoObservationTwoUnique() {
-
-    std::map<V3D, UniqueReflection> uniques{
-        {{1, 1, 1}, getReflectionWithPeaks({10.0}, {1.0}, 1.0)},
-        {{1, 1, 2}, getReflectionWithPeaks({20.0}, {1.0}, 2.0)}};
-
-    PeaksStatistics statistics(uniques, 2);
-    TS_ASSERT_EQUALS(statistics.m_peaks.size(), 2);
-    TS_ASSERT_EQUALS(statistics.m_uniqueReflections, 2);
-    TS_ASSERT_EQUALS(statistics.m_redundancy, 1.0);
-    TS_ASSERT_EQUALS(statistics.m_completeness, 1.0);
-    TS_ASSERT_EQUALS(statistics.m_rMerge, 0.0);
-    TS_ASSERT_EQUALS(statistics.m_rPim, 0.0);
-    TS_ASSERT_EQUALS(statistics.m_meanIOverSigma, 15.0);
-  }
-
-  void test_PeaksStatisticsTwoObservationOneUnique() {
-    std::map<V3D, UniqueReflection> uniques{
-        {{1, 1, 1}, getReflectionWithPeaks({10.0, 20.0}, {0.1, 0.1}, 1.0)}};
-
-    PeaksStatistics statistics(uniques, 2);
-    TS_ASSERT_EQUALS(statistics.m_peaks.size(), 2);
-    TS_ASSERT_EQUALS(statistics.m_uniqueReflections, 1);
-    TS_ASSERT_EQUALS(statistics.m_redundancy, 2.0);
-    TS_ASSERT_EQUALS(statistics.m_completeness, 1.0);
-    // <I> = 15, sum(I) = 30, sum(|I - <I>|) = 10, rMerge = 10 / 30 = 0.33
-    TS_ASSERT_EQUALS(statistics.m_rMerge, 1.0 / 3.0);
-    // For 2 observations this is the same since sqrt(1 / (2 - 1)) = 1
-    TS_ASSERT_EQUALS(statistics.m_rPim, 1.0 / 3.0);
-    TS_ASSERT_EQUALS(statistics.m_meanIOverSigma, 150.0);
-  }
-
-  void test_PeaksStatisticsThreeObservationOneUnique() {
-    std::map<V3D, UniqueReflection> uniques{
-        {{1, 1, 1},
-         getReflectionWithPeaks({10.0, 20.0, 15.0}, {0.1, 0.1, 0.1}, 1.0)}};
-
-    PeaksStatistics statistics(uniques, 3);
-    TS_ASSERT_EQUALS(statistics.m_peaks.size(), 3);
-    TS_ASSERT_EQUALS(statistics.m_uniqueReflections, 1);
-    TS_ASSERT_EQUALS(statistics.m_redundancy, 3.0);
-    TS_ASSERT_EQUALS(statistics.m_completeness, 1.0);
-    // <I> = 15, sum(I) = 45, sum(|I - <I>|) = 10, rMerge = 10 / 45 = 0.222
-    TS_ASSERT_EQUALS(statistics.m_rMerge, 1.0 / 4.5);
-    // For rpim the factor is  sqrt(1 / (3 - 1)) = sqrt(0.5)
-    TS_ASSERT_EQUALS(statistics.m_rPim, sqrt(0.5) / 4.5);
-    TS_ASSERT_EQUALS(statistics.m_meanIOverSigma, 150.0);
-  }
-
   void test_Init() {
     SortHKL alg;
     TS_ASSERT_THROWS_NOTHING(alg.initialize());
@@ -327,43 +126,6 @@ public:
 
   /// Test with a few peaks
   void test_exec() { do_test(2, 4, 4); }
-
-private:
-  std::vector<Peak>
-  getPeaksWithIandSigma(const std::vector<double> &intensity,
-                        const std::vector<double> &sigma) const {
-    std::vector<Peak> peaks;
-    std::transform(intensity.begin(), intensity.end(), sigma.begin(),
-                   std::back_inserter(peaks),
-                   [](double intensity, double sigma) {
-                     Peak peak;
-                     peak.setIntensity(intensity);
-                     peak.setSigmaIntensity(sigma);
-                     return peak;
-                   });
-
-    return peaks;
-  }
-
-  UniqueReflection
-  getReflectionWithPeaks(const std::vector<double> &intensities,
-                         const std::vector<double> &sigmas,
-                         double wavelength = 0.0) const {
-    std::vector<Peak> peaks = getPeaksWithIandSigma(intensities, sigmas);
-
-    if (wavelength > 0) {
-      for (auto &peak : peaks) {
-        peak.setWavelength(wavelength);
-      }
-    }
-
-    UniqueReflection reflection(V3D(2, 3, 4));
-    for (auto peak : peaks) {
-      reflection.addPeak(peak);
-    }
-
-    return reflection;
-  }
 };
 
 #endif /* MANTID_CRYSTAL_SORTHKLTEST_H_ */
diff --git a/Framework/CurveFitting/CMakeLists.txt b/Framework/CurveFitting/CMakeLists.txt
index 6716713c08b5f92af31e8135cb424e5d160b208a..ebd31ccae7f6e20d6e58389c813ebb9a7b355bfb 100644
--- a/Framework/CurveFitting/CMakeLists.txt
+++ b/Framework/CurveFitting/CMakeLists.txt
@@ -81,7 +81,6 @@ set ( SRC_FILES
 	src/Functions/ExpDecayOsc.cpp
 	src/Functions/FlatBackground.cpp
 	src/Functions/FullprofPolynomial.cpp
-	src/Functions/FunctionGenerator.cpp
 	src/Functions/FunctionQDepends.cpp
 	src/Functions/GausDecay.cpp
 	src/Functions/GausOsc.cpp
@@ -238,7 +237,6 @@ set ( INC_FILES
 	inc/MantidCurveFitting/Functions/ExpDecayOsc.h
 	inc/MantidCurveFitting/Functions/FlatBackground.h
 	inc/MantidCurveFitting/Functions/FullprofPolynomial.h
-	inc/MantidCurveFitting/Functions/FunctionGenerator.h
 	inc/MantidCurveFitting/Functions/FunctionQDepends.h
 	inc/MantidCurveFitting/Functions/GausDecay.h
 	inc/MantidCurveFitting/Functions/GausOsc.h
diff --git a/Framework/CurveFitting/inc/MantidCurveFitting/Algorithms/Fit.h b/Framework/CurveFitting/inc/MantidCurveFitting/Algorithms/Fit.h
index 430d68a1a4505925459feb341ab81840bc6cf57e..d90df8c1efbc63334a0c55152df152da55bfe236 100644
--- a/Framework/CurveFitting/inc/MantidCurveFitting/Algorithms/Fit.h
+++ b/Framework/CurveFitting/inc/MantidCurveFitting/Algorithms/Fit.h
@@ -98,21 +98,31 @@ Code Documentation is available at: <http://doxygen.mantidproject.org>
 class DLLExport Fit : public IFittingAlgorithm {
 public:
   /// Default constructor
-  Fit() : IFittingAlgorithm() {}
+  Fit();
   /// Algorithm's name for identification overriding a virtual method
   const std::string name() const override { return "Fit"; }
   /// Summary of algorithms purpose
   const std::string summary() const override {
     return "Fits a function to data in a Workspace";
   }
-
   /// Algorithm's version for identification overriding a virtual method
   int version() const override { return (1); }
 
-protected:
+private:
   void initConcrete() override;
   void execConcrete() override;
+  void readProperties();
+  void initializeMinimizer(size_t maxIterations);
+  size_t runMinimizer();
+  void finalizeMinimizer(size_t nIterations);
   void copyMinimizerOutput(const API::IFuncMinimizer &minimizer);
+  void createOutput();
+  /// The cost function
+  boost::shared_ptr<CostFunctions::CostFuncFitting> m_costFunction;
+  /// The minimizer
+  boost::shared_ptr<API::IFuncMinimizer> m_minimizer;
+  /// Max number of iterations
+  size_t m_maxIterations;
 };
 
 } // namespace Algorithms
diff --git a/Framework/CurveFitting/inc/MantidCurveFitting/Constraints/BoundaryConstraint.h b/Framework/CurveFitting/inc/MantidCurveFitting/Constraints/BoundaryConstraint.h
index 83fc5cc4d938188aba69b182d49b30bbc0f5673c..513e2a9e11fad166584000cbcfde41ecf9152023 100644
--- a/Framework/CurveFitting/inc/MantidCurveFitting/Constraints/BoundaryConstraint.h
+++ b/Framework/CurveFitting/inc/MantidCurveFitting/Constraints/BoundaryConstraint.h
@@ -44,15 +44,10 @@ Code Documentation is available at: <http://doxygen.mantidproject.org>
 class DLLExport BoundaryConstraint : public API::IConstraint {
 public:
   /// Default constructor
-  BoundaryConstraint()
-      : API::IConstraint(), m_penaltyFactor(1000.0), m_parameterName(""),
-        m_hasLowerBound(false), m_hasUpperBound(false), m_lowerBound(DBL_MAX),
-        m_upperBound(-DBL_MAX) {}
+  BoundaryConstraint();
 
   /// Constructor with no boundary arguments
-  BoundaryConstraint(const std::string &paramName)
-      : API::IConstraint(), m_penaltyFactor(1000.0), m_parameterName(paramName),
-        m_hasLowerBound(false), m_hasUpperBound(false) {}
+  BoundaryConstraint(const std::string &paramName);
 
   /// Constructor with boundary arguments
   BoundaryConstraint(API::IFunction *fun, const std::string paramName,
@@ -114,7 +109,7 @@ public:
   }
 
   /// Get parameter name
-  std::string getParameterName() const { return m_parameterName; }
+  //  std::string getParameterName() const { return m_parameterName; }
 
   /// overwrite IConstraint base class methods
   double check() override;
@@ -128,7 +123,7 @@ private:
   double m_penaltyFactor;
 
   /// name of parameter you want to constraint
-  std::string m_parameterName;
+  // std::string m_parameterName;
 
   /// has a lower bound set true/false
   bool m_hasLowerBound;
diff --git a/Framework/CurveFitting/inc/MantidCurveFitting/Functions/CrystalFieldMultiSpectrum.h b/Framework/CurveFitting/inc/MantidCurveFitting/Functions/CrystalFieldMultiSpectrum.h
index 328fb8ee636276bd702329613de90214c77c91db..b7d8e824708ddadb36d86965a81f50cf9494da85 100644
--- a/Framework/CurveFitting/inc/MantidCurveFitting/Functions/CrystalFieldMultiSpectrum.h
+++ b/Framework/CurveFitting/inc/MantidCurveFitting/Functions/CrystalFieldMultiSpectrum.h
@@ -1,9 +1,9 @@
 #ifndef MANTID_CURVEFITTING_CRYSTALFIELDMULTISPECTRUM_H_
 #define MANTID_CURVEFITTING_CRYSTALFIELDMULTISPECTRUM_H_
 
+#include "MantidAPI/FunctionGenerator.h"
 #include "MantidAPI/FunctionValues.h"
 #include "MantidCurveFitting/FortranDefs.h"
-#include "MantidCurveFitting/Functions/FunctionGenerator.h"
 
 namespace Mantid {
 namespace CurveFitting {
@@ -32,7 +32,7 @@ along with this program.  If not, see <http://www.gnu.org/licenses/>.
 File change history is stored at: <https://github.com/mantidproject/mantid>
 Code Documentation is available at: <http://doxygen.mantidproject.org>
 */
-class DLLExport CrystalFieldMultiSpectrum : public FunctionGenerator {
+class DLLExport CrystalFieldMultiSpectrum : public API::FunctionGenerator {
 public:
   CrystalFieldMultiSpectrum();
   std::string name() const override { return "CrystalFieldMultiSpectrum"; }
@@ -66,7 +66,7 @@ private:
                       const DoubleFortranVector &en,
                       const ComplexFortranMatrix &wf,
                       const ComplexFortranMatrix &ham, double temperature,
-                      size_t i) const;
+                      double fwhm, size_t i) const;
   /// Calculate excitations at given temperature
   void calcExcitations(int nre, const DoubleFortranVector &en,
                        const ComplexFortranMatrix &wf, double temperature,
@@ -78,6 +78,10 @@ private:
   /// Caches of the width functions
   mutable std::vector<std::vector<double>> m_fwhmX;
   mutable std::vector<std::vector<double>> m_fwhmY;
+  /// Cache the temperatures
+  mutable std::vector<double> m_temperatures;
+  /// Cache the default peak FWHMs
+  mutable std::vector<double> m_FWHMs;
 };
 
 } // namespace Functions
diff --git a/Framework/CurveFitting/inc/MantidCurveFitting/Functions/CrystalFieldPeakUtils.h b/Framework/CurveFitting/inc/MantidCurveFitting/Functions/CrystalFieldPeakUtils.h
index 395b0cd61b765164f5ec00dd5598ed532873a695..c35d797f4c07baef958295c2e28c0a9efb71380f 100644
--- a/Framework/CurveFitting/inc/MantidCurveFitting/Functions/CrystalFieldPeakUtils.h
+++ b/Framework/CurveFitting/inc/MantidCurveFitting/Functions/CrystalFieldPeakUtils.h
@@ -46,11 +46,12 @@ size_t buildSpectrumFunction(API::CompositeFunction &spectrum,
                              double fwhmVariation, double defaultFWHM,
                              size_t nRequiredPeaks, bool fixAllPeaks);
 size_t updateSpectrumFunction(API::CompositeFunction &spectrum,
+                              const std::string &peakShape,
                               const API::FunctionValues &centresAndIntensities,
-                              size_t nOriginalPeaks, size_t iFirst,
-                              const std::vector<double> &xVec,
+                              size_t iFirst, const std::vector<double> &xVec,
                               const std::vector<double> &yVec,
-                              double fwhmVariation);
+                              double fwhmVariation, double defaultFWHM,
+                              bool fixAllPeaks);
 size_t calculateNPeaks(const API::FunctionValues &centresAndIntensities);
 size_t calculateMaxNPeaks(size_t nPeaks);
 
diff --git a/Framework/CurveFitting/inc/MantidCurveFitting/Functions/CrystalFieldSpectrum.h b/Framework/CurveFitting/inc/MantidCurveFitting/Functions/CrystalFieldSpectrum.h
index b502e5b7e97ce3b0d715e78fc8a7fb0ffc9b9a28..bf96865210adbc3b95acd740951f30e96456aec8 100644
--- a/Framework/CurveFitting/inc/MantidCurveFitting/Functions/CrystalFieldSpectrum.h
+++ b/Framework/CurveFitting/inc/MantidCurveFitting/Functions/CrystalFieldSpectrum.h
@@ -1,7 +1,7 @@
 #ifndef MANTID_CURVEFITTING_CRYSTALFIELDSPECTRUM_H_
 #define MANTID_CURVEFITTING_CRYSTALFIELDSPECTRUM_H_
 
-#include "MantidCurveFitting/Functions/FunctionGenerator.h"
+#include "MantidAPI/FunctionGenerator.h"
 
 namespace Mantid {
 namespace CurveFitting {
@@ -30,7 +30,7 @@ along with this program.  If not, see <http://www.gnu.org/licenses/>.
 File change history is stored at: <https://github.com/mantidproject/mantid>
 Code Documentation is available at: <http://doxygen.mantidproject.org>
 */
-class DLLExport CrystalFieldSpectrum : public FunctionGenerator {
+class DLLExport CrystalFieldSpectrum : public API::FunctionGenerator {
 public:
   CrystalFieldSpectrum();
   std::string name() const override { return "CrystalFieldSpectrum"; }
diff --git a/Framework/CurveFitting/inc/MantidCurveFitting/Functions/Gaussian.h b/Framework/CurveFitting/inc/MantidCurveFitting/Functions/Gaussian.h
index c09f2e9a7bb050bdca7ecf1518899d50236e2050..e4478f8b75f318c251af28f2bf57618db20f0d71 100644
--- a/Framework/CurveFitting/inc/MantidCurveFitting/Functions/Gaussian.h
+++ b/Framework/CurveFitting/inc/MantidCurveFitting/Functions/Gaussian.h
@@ -61,9 +61,9 @@ public:
   void setFwhm(const double w) override;
   void setIntensity(const double i) override;
 
-  void fixCentre() override;
+  void fixCentre(bool isDefault = false) override;
   void unfixCentre() override;
-  void fixIntensity() override;
+  void fixIntensity(bool isDefault = false) override;
   void unfixIntensity() override;
 
   /// overwrite IFunction base class methods
diff --git a/Framework/CurveFitting/inc/MantidCurveFitting/Functions/Lorentzian.h b/Framework/CurveFitting/inc/MantidCurveFitting/Functions/Lorentzian.h
index 2a6d0e5975f729c3937b153f94de8a381e39ed8f..fdd10e2f6aa49fcbb2aa4a08273512121c932120 100644
--- a/Framework/CurveFitting/inc/MantidCurveFitting/Functions/Lorentzian.h
+++ b/Framework/CurveFitting/inc/MantidCurveFitting/Functions/Lorentzian.h
@@ -56,9 +56,9 @@ public:
   void setHeight(const double h) override;
   void setFwhm(const double w) override;
   void setIntensity(const double i) override { setParameter("Amplitude", i); }
-  void fixCentre() override;
+  void fixCentre(bool isDefault = false) override;
   void unfixCentre() override;
-  void fixIntensity() override;
+  void fixIntensity(bool isDefault = false) override;
   void unfixIntensity() override;
 
   /// overwrite IFunction base class methods
diff --git a/Framework/CurveFitting/inc/MantidCurveFitting/Jacobian.h b/Framework/CurveFitting/inc/MantidCurveFitting/Jacobian.h
index 9fbf274107936a7ad7fefadd7ccfa286911ee7d8..09730cd87e0c4896a2b704dbcaecdfc4599228ac 100644
--- a/Framework/CurveFitting/inc/MantidCurveFitting/Jacobian.h
+++ b/Framework/CurveFitting/inc/MantidCurveFitting/Jacobian.h
@@ -2,6 +2,7 @@
 #define MANTID_CURVEFITTING_GSLFUNCTIONS_H_
 
 #include "MantidAPI/Jacobian.h"
+#include "MantidKernel/Exception.h"
 #include <gsl/gsl_matrix.h>
 
 #include <vector>
@@ -74,7 +75,7 @@ public:
       throw std::out_of_range("Data index in Jacobian is out of range");
     }
     if (iP >= m_np) {
-      throw std::out_of_range("Parameter index in Jacobian is out of range");
+      throw Kernel::Exception::FitSizeWarning(m_np);
     }
     m_data[iY * m_np + iP] = value;
   }
@@ -84,7 +85,7 @@ public:
       throw std::out_of_range("Data index in Jacobian is out of range");
     }
     if (iP >= m_np) {
-      throw std::out_of_range("Parameter index in Jacobian is out of range");
+      throw Kernel::Exception::FitSizeWarning(m_np);
     }
     return m_data[iY * m_np + iP];
   }
diff --git a/Framework/CurveFitting/src/Algorithms/CalculateChiSquared.cpp b/Framework/CurveFitting/src/Algorithms/CalculateChiSquared.cpp
index 8dcd276acc41e5a3ae014d50d2680278d43057cc..25a5738c573e6e8b77c74aae6a309e03785c9846 100644
--- a/Framework/CurveFitting/src/Algorithms/CalculateChiSquared.cpp
+++ b/Framework/CurveFitting/src/Algorithms/CalculateChiSquared.cpp
@@ -117,7 +117,7 @@ void CalculateChiSquared::execConcrete() {
   // Get the number of free fitting parameters
   size_t nParams = 0;
   for (size_t i = 0; i < m_function->nParams(); ++i) {
-    if (!m_function->isFixed(i))
+    if (m_function->isActive(i))
       nParams += 1;
   }
 
@@ -658,7 +658,7 @@ void CalculateChiSquared::estimateErrors() {
 /// Temporary unfix any fixed parameters.
 void CalculateChiSquared::unfixParameters() {
   for (size_t i = 0; i < m_function->nParams(); ++i) {
-    if (m_function->isFixed(i)) {
+    if (!m_function->isActive(i)) {
       m_function->unfix(i);
       m_fixedParameters.push_back(i);
     }
diff --git a/Framework/CurveFitting/src/Algorithms/EstimateFitParameters.cpp b/Framework/CurveFitting/src/Algorithms/EstimateFitParameters.cpp
index 680aa066e296164a201a07e47cdfa715bd91bb87..2485a0cc1882e1119827512e8f49dd2109663eb1 100644
--- a/Framework/CurveFitting/src/Algorithms/EstimateFitParameters.cpp
+++ b/Framework/CurveFitting/src/Algorithms/EstimateFitParameters.cpp
@@ -65,7 +65,7 @@ void fixBadParameters(CostFunctions::CostFuncFitting &costFunction,
   std::vector<double> P, A, D;
   auto &fun = *costFunction.getFittingFunction();
   for (size_t i = 0, j = 0; i < fun.nParams(); ++i) {
-    if (fun.isFixed(i)) {
+    if (!fun.isActive(i)) {
       continue;
     }
     auto lBound = ranges[j].first;
@@ -372,7 +372,7 @@ void EstimateFitParameters::execConcrete() {
   std::vector<std::pair<double, double>> ranges;
   ranges.reserve(costFunction->nParams());
   for (size_t i = 0; i < func->nParams(); ++i) {
-    if (func->isFixed(i)) {
+    if (!func->isActive(i)) {
       continue;
     }
     auto constraint = func->getConstraint(i);
@@ -427,7 +427,7 @@ void EstimateFitParameters::execConcrete() {
       }
 
       for (size_t i = 0, ia = 0; i < m_function->nParams(); ++i) {
-        if (!m_function->isFixed(i)) {
+        if (m_function->isActive(i)) {
           TableRow row = table->appendRow();
           row << m_function->parameterName(i);
           for (size_t j = 0; j < output.size(); ++j) {
diff --git a/Framework/CurveFitting/src/Algorithms/Fit.cpp b/Framework/CurveFitting/src/Algorithms/Fit.cpp
index f9341b161e864f711dbd3e75f82c1d7083e7452e..112646cdac41be804e415309eb3aaf1c95c4278c 100644
--- a/Framework/CurveFitting/src/Algorithms/Fit.cpp
+++ b/Framework/CurveFitting/src/Algorithms/Fit.cpp
@@ -4,6 +4,7 @@
 #include "MantidCurveFitting/Algorithms/Fit.h"
 #include "MantidCurveFitting/CostFunctions/CostFuncFitting.h"
 
+#include "MantidAPI/CompositeFunction.h"
 #include "MantidAPI/FuncMinimizerFactory.h"
 #include "MantidAPI/IFuncMinimizer.h"
 #include "MantidAPI/ITableWorkspace.h"
@@ -12,8 +13,11 @@
 #include "MantidAPI/WorkspaceFactory.h"
 
 #include "MantidKernel/BoundedValidator.h"
+#include "MantidKernel/Exception.h"
 #include "MantidKernel/StartsWithValidator.h"
 
+#include <boost/make_shared.hpp>
+
 namespace Mantid {
 namespace CurveFitting {
 namespace Algorithms {
@@ -21,6 +25,9 @@ namespace Algorithms {
 // Register the class into the algorithm factory
 DECLARE_ALGORITHM(Fit)
 
+/// Default constructor
+Fit::Fit() : IFittingAlgorithm(), m_maxIterations() {}
+
 /** Initialisation method
 */
 void Fit::initConcrete() {
@@ -97,6 +104,32 @@ void Fit::initConcrete() {
                   "Output is an empty string).");
 }
 
+/// Read in the properties specific to Fit.
+void Fit::readProperties() {
+  std::string ties = getPropertyValue("Ties");
+  if (!ties.empty()) {
+    m_function->addTies(ties);
+  }
+  std::string contstraints = getPropertyValue("Constraints");
+  if (!contstraints.empty()) {
+    m_function->addConstraints(contstraints);
+  }
+
+  // Try to retrieve optional properties
+  int intMaxIterations = getProperty("MaxIterations");
+  m_maxIterations = static_cast<size_t>(intMaxIterations);
+}
+
+/// Initialize the minimizer for this fit.
+/// @param maxIterations :: Maximum number of iterations.
+void Fit::initializeMinimizer(size_t maxIterations) {
+  m_costFunction = getCostFunctionInitialized();
+  std::string minimizerName = getPropertyValue("Minimizer");
+  m_minimizer =
+      API::FuncMinimizerFactory::Instance().createMinimizer(minimizerName);
+  m_minimizer->initialize(m_costFunction, maxIterations);
+}
+
 /**
   * Copy all output workspace properties from the minimizer to Fit algorithm.
   * @param minimizer :: The minimizer to copy from.
@@ -113,89 +146,88 @@ void Fit::copyMinimizerOutput(const API::IFuncMinimizer &minimizer) {
   }
 }
 
-/** Executes the algorithm
-*
-*  @throw runtime_error Thrown if algorithm cannot execute
-*/
-void Fit::execConcrete() {
-
-  std::string ties = getPropertyValue("Ties");
-  if (!ties.empty()) {
-    m_function->addTies(ties);
-  }
-  std::string contstraints = getPropertyValue("Constraints");
-  if (!contstraints.empty()) {
-    m_function->addConstraints(contstraints);
-  }
-
-  auto costFunc = getCostFunctionInitialized();
-
-  // Try to retrieve optional properties
-  int intMaxIterations = getProperty("MaxIterations");
-  const size_t maxIterations = static_cast<size_t>(intMaxIterations);
-
-  // get the minimizer
-  std::string minimizerName = getPropertyValue("Minimizer");
-  API::IFuncMinimizer_sptr minimizer =
-      API::FuncMinimizerFactory::Instance().createMinimizer(minimizerName);
-  minimizer->initialize(costFunc, maxIterations);
-
-  const int64_t nsteps = maxIterations * m_function->estimateNoProgressCalls();
-  API::Progress prog(this, 0.0, 1.0, nsteps);
-  m_function->setProgressReporter(&prog);
+/// Run the minimizer's iteration loop.
+/// @returns :: Number of actual iterations.
+size_t Fit::runMinimizer() {
+  const int64_t nsteps =
+      m_maxIterations * m_function->estimateNoProgressCalls();
+  auto prog = boost::make_shared<API::Progress>(this, 0.0, 1.0, nsteps);
+  m_function->setProgressReporter(prog);
 
   // do the fitting until success or iteration limit is reached
   size_t iter = 0;
-  bool success = false;
-  std::string errorString;
+  bool isFinished = false;
   g_log.debug("Starting minimizer iteration\n");
-  while (iter < maxIterations) {
+  while (iter < m_maxIterations) {
     g_log.debug() << "Starting iteration " << iter << "\n";
-    m_function->iterationStarting();
-    if (!minimizer->iterate(iter)) {
-      errorString = minimizer->getError();
-      g_log.debug() << "Iteration stopped. Minimizer status string="
-                    << errorString << "\n";
-
-      success = errorString.empty() || errorString == "success";
-      if (success) {
-        errorString = "success";
+    try {
+      // Perform a single iteration. isFinished is set when minimizer wants to
+      // quit.
+      m_function->iterationStarting();
+      isFinished = !m_minimizer->iterate(iter);
+      m_function->iterationFinished();
+    } catch (Kernel::Exception::FitSizeWarning &) {
+      // This is an attempt to recover after the function changes its number of
+      // parameters or ties during the iteration.
+      if (auto cf = dynamic_cast<API::CompositeFunction *>(m_function.get())) {
+        // Make sure the composite function is valid.
+        cf->checkFunction();
       }
+      // Re-create the cost function and minimizer.
+      initializeMinimizer(m_maxIterations - iter);
+    }
+
+    prog->report();
+
+    if (isFinished) {
+      // It was the last iteration. Break out of the loop and return the number
+      // of finished iterations.
       break;
     }
-    prog.report();
-    m_function->iterationFinished();
     ++iter;
   }
   g_log.debug() << "Number of minimizer iterations=" << iter << "\n";
+  return iter;
+}
+
+/// Finalize the minimizer.
+/// @param nIterations :: The actual number of iterations done by the minimizer.
+void Fit::finalizeMinimizer(size_t nIterations) {
+  m_minimizer->finalize();
 
-  minimizer->finalize();
+  auto errorString = m_minimizer->getError();
+  g_log.debug() << "Iteration stopped. Minimizer status string=" << errorString
+                << "\n";
 
-  if (iter >= maxIterations) {
+  bool success = errorString.empty() || errorString == "success";
+  if (success) {
+    errorString = "success";
+  }
+
+  if (nIterations >= m_maxIterations) {
     if (!errorString.empty()) {
       errorString += '\n';
     }
-    errorString += "Failed to converge after " + std::to_string(maxIterations) +
-                   " iterations.";
+    errorString += "Failed to converge after " +
+                   std::to_string(m_maxIterations) + " iterations.";
   }
 
   // return the status flag
   setPropertyValue("OutputStatus", errorString);
+}
+
+/// Create algorithm output worksapces.
+void Fit::createOutput() {
 
   // degrees of freedom
-  size_t dof = costFunc->getDomain()->size() - costFunc->nParams();
+  size_t dof = m_costFunction->getDomain()->size() - m_costFunction->nParams();
   if (dof == 0)
     dof = 1;
-  double rawcostfuncval = minimizer->costFunctionVal();
+  double rawcostfuncval = m_minimizer->costFunctionVal();
   double finalCostFuncVal = rawcostfuncval / double(dof);
 
   setProperty("OutputChi2overDoF", finalCostFuncVal);
 
-  // fit ended, creating output
-
-  // get the workspace
-  API::Workspace_const_sptr ws = getProperty("InputWorkspace");
-
   bool doCreateOutput = getProperty("CreateOutput");
   std::string baseName = getPropertyValue("Output");
   if (!baseName.empty()) {
@@ -205,19 +237,22 @@ void Fit::execConcrete() {
   if (doCreateOutput) {
     doCalcErrors = true;
   }
-  if (costFunc->nParams() == 0) {
+  if (m_costFunction->nParams() == 0) {
     doCalcErrors = false;
   }
 
   GSLMatrix covar;
   if (doCalcErrors) {
     // Calculate the covariance matrix and the errors.
-    costFunc->calCovarianceMatrix(covar);
-    costFunc->calFittingErrors(covar, rawcostfuncval);
+    m_costFunction->calCovarianceMatrix(covar);
+    m_costFunction->calFittingErrors(covar, rawcostfuncval);
   }
 
   if (doCreateOutput) {
-    copyMinimizerOutput(*minimizer);
+    copyMinimizerOutput(*m_minimizer);
+
+    // get the workspace
+    API::Workspace_const_sptr ws = getProperty("InputWorkspace");
 
     if (baseName.empty()) {
       baseName = ws->getName();
@@ -240,25 +275,22 @@ void Fit::execConcrete() {
     covariance->addColumn("str", "Name");
     // set plot type to Label = 6
     covariance->getColumn(covariance->columnCount() - 1)->setPlotType(6);
-    // std::vector<std::string> paramThatAreFitted; // used for populating 1st
-    // "name" column
     for (size_t i = 0; i < m_function->nParams(); i++) {
       if (m_function->isActive(i)) {
         covariance->addColumn("double", m_function->parameterName(i));
-        // paramThatAreFitted.push_back(m_function->parameterName(i));
       }
     }
 
     size_t np = m_function->nParams();
     size_t ia = 0;
     for (size_t i = 0; i < np; i++) {
-      if (m_function->isFixed(i))
+      if (!m_function->isActive(i))
         continue;
       Mantid::API::TableRow row = covariance->appendRow();
       row << m_function->parameterName(i);
       size_t ja = 0;
       for (size_t j = 0; j < np; j++) {
-        if (m_function->isFixed(j))
+        if (!m_function->isActive(j))
           continue;
         if (j == i)
           row << 100.0;
@@ -307,23 +339,14 @@ void Fit::execConcrete() {
     }
     // Add chi-squared value at the end of parameter table
     Mantid::API::TableRow row = result->appendRow();
-#if 1
+
     std::string costfuncname = getPropertyValue("CostFunction");
     if (costfuncname == "Rwp")
       row << "Cost function value" << rawcostfuncval;
     else
       row << "Cost function value" << finalCostFuncVal;
-    setProperty("OutputParameters", result);
-#else
-    row << "Cost function value" << finalCostFuncVal;
-    Mantid::API::TableRow row2 = result->appendRow();
-    std::string name(getPropertyValue("CostFunction"));
-    name += " value";
-    row2 << name << rawcostfuncval;
-#endif
 
     setProperty("OutputParameters", result);
-
     bool outputParametersOnly = getProperty("OutputParametersOnly");
 
     if (!outputParametersOnly) {
@@ -334,10 +357,33 @@ void Fit::execConcrete() {
       }
       m_domainCreator->separateCompositeMembersInOutput(unrollComposites,
                                                         convolveMembers);
-      m_domainCreator->createOutputWorkspace(
-          baseName, m_function, costFunc->getDomain(), costFunc->getValues());
+      m_domainCreator->createOutputWorkspace(baseName, m_function,
+                                             m_costFunction->getDomain(),
+                                             m_costFunction->getValues());
     }
   }
+}
+
+/** Executes the algorithm
+*
+*  @throw runtime_error Thrown if algorithm cannot execute
+*/
+void Fit::execConcrete() {
+
+  // Read Fit's own properties
+  readProperties();
+
+  // Get the minimizer
+  initializeMinimizer(m_maxIterations);
+
+  // Run the minimizer
+  auto nIterations = runMinimizer();
+
+  // Finilize the minimizer.
+  finalizeMinimizer(nIterations);
+
+  // fit ended, creating output
+  createOutput();
 
   progress(1.0);
 }
diff --git a/Framework/CurveFitting/src/Algorithms/FitPowderDiffPeaks.cpp b/Framework/CurveFitting/src/Algorithms/FitPowderDiffPeaks.cpp
index c4271f59cdfb37eb93ad3201f8ca5564561dfe23..7671a6f74f8202c9a4ffad876d660c30d76fb00c 100644
--- a/Framework/CurveFitting/src/Algorithms/FitPowderDiffPeaks.cpp
+++ b/Framework/CurveFitting/src/Algorithms/FitPowderDiffPeaks.cpp
@@ -3102,7 +3102,7 @@ string getFunctionInfo(IFunction_sptr function) {
   outss << "Number of Parameters = " << numpars << '\n';
   for (size_t i = 0; i < numpars; ++i)
     outss << parnames[i] << " = " << function->getParameter(i)
-          << ", \t\tFitted = " << !function->isFixed(i) << '\n';
+          << ", \t\tFitted = " << function->isActive(i) << '\n';
 
   return outss.str();
 }
diff --git a/Framework/CurveFitting/src/Algorithms/RefinePowderInstrumentParameters3.cpp b/Framework/CurveFitting/src/Algorithms/RefinePowderInstrumentParameters3.cpp
index 9c9fe229f0f55b6617e23e5ad150fd7da6315ef7..ae0a4d084156beec33f401813a0fdc83445f44b0 100644
--- a/Framework/CurveFitting/src/Algorithms/RefinePowderInstrumentParameters3.cpp
+++ b/Framework/CurveFitting/src/Algorithms/RefinePowderInstrumentParameters3.cpp
@@ -822,7 +822,7 @@ double RefinePowderInstrumentParameters3::calculateFunctionError(
   vector<bool> vecFix(parnames.size(), false);
 
   for (size_t i = 0; i < parnames.size(); ++i) {
-    bool fixed = function->isFixed(i);
+    bool fixed = !function->isActive(i);
     vecFix[i] = fixed;
     if (!fixed)
       function->fix(i);
diff --git a/Framework/CurveFitting/src/Constraints/BoundaryConstraint.cpp b/Framework/CurveFitting/src/Constraints/BoundaryConstraint.cpp
index 86f3f52be2a7fe4549198c0a4926b5a0c3d1074a..823084b4ee9afccc8018d5e9dcf83d376c1d6ec8 100644
--- a/Framework/CurveFitting/src/Constraints/BoundaryConstraint.cpp
+++ b/Framework/CurveFitting/src/Constraints/BoundaryConstraint.cpp
@@ -4,9 +4,11 @@
 #include "MantidCurveFitting/Constraints/BoundaryConstraint.h"
 #include "MantidAPI/Expression.h"
 #include "MantidAPI/ConstraintFactory.h"
+#include "MantidAPI/IFunction.h"
 #include "MantidKernel/Logger.h"
 #include <boost/lexical_cast.hpp>
 #include <sstream>
+#include <iostream>
 
 namespace Mantid {
 namespace CurveFitting {
@@ -21,6 +23,19 @@ DECLARE_CONSTRAINT(BoundaryConstraint)
 // using namespace Kernel;
 using namespace API;
 
+/// Default constructor
+BoundaryConstraint::BoundaryConstraint()
+    : API::IConstraint(), m_penaltyFactor(1000.0), m_hasLowerBound(false),
+      m_hasUpperBound(false), m_lowerBound(DBL_MAX), m_upperBound(-DBL_MAX) {}
+
+/// Constructor with no boundary arguments
+/// @param paramName :: The parameter name
+BoundaryConstraint::BoundaryConstraint(const std::string &paramName)
+    : API::IConstraint(), m_penaltyFactor(1000.0), m_hasLowerBound(false),
+      m_hasUpperBound(false) {
+  UNUSED_ARG(paramName);
+}
+
 /** Constructor with boundary arguments
  * @param fun :: The function
  * @param paramName :: The parameter name
@@ -34,18 +49,16 @@ BoundaryConstraint::BoundaryConstraint(API::IFunction *fun,
                                        const std::string paramName,
                                        const double lowerBound,
                                        const double upperBound, bool isDefault)
-    : m_penaltyFactor(1000.0), m_parameterName(paramName),
-      m_hasLowerBound(true), m_hasUpperBound(true), m_lowerBound(lowerBound),
-      m_upperBound(upperBound) {
+    : m_penaltyFactor(1000.0), m_hasLowerBound(true), m_hasUpperBound(true),
+      m_lowerBound(lowerBound), m_upperBound(upperBound) {
   reset(fun, fun->parameterIndex(paramName), isDefault);
 }
 
 BoundaryConstraint::BoundaryConstraint(API::IFunction *fun,
                                        const std::string paramName,
                                        const double lowerBound, bool isDefault)
-    : m_penaltyFactor(1000.0), m_parameterName(paramName),
-      m_hasLowerBound(true), m_hasUpperBound(false), m_lowerBound(lowerBound),
-      m_upperBound(-DBL_MAX) {
+    : m_penaltyFactor(1000.0), m_hasLowerBound(true), m_hasUpperBound(false),
+      m_lowerBound(lowerBound), m_upperBound(-DBL_MAX) {
   reset(fun, fun->parameterIndex(paramName), isDefault);
 }
 
@@ -113,7 +126,6 @@ void BoundaryConstraint::initialize(API::IFunction *fun,
   try {
     size_t i = fun->parameterIndex(parName);
     reset(fun, i, isDefault);
-    m_parameterName = parName;
   } catch (...) {
     g_log.error() << "Parameter " << parName << " not found in function "
                   << fun->name() << '\n';
@@ -146,31 +158,29 @@ void BoundaryConstraint::setParamToSatisfyConstraint() {
   if (!(m_hasLowerBound || m_hasUpperBound)) {
     g_log.warning()
         << "No bounds have been set on BoundaryConstraint for parameter "
-        << m_parameterName << ". Therefore"
+        << parameterName() << ". Therefore"
         << " this constraint serves no purpose!";
     return;
   }
 
-  double paramValue = getFunction()->getParameter(getIndex());
+  double paramValue = getParameter();
 
-  if (m_hasLowerBound)
-    if (paramValue < m_lowerBound)
-      getFunction()->setParameter(getIndex(), m_lowerBound, false);
-  if (m_hasUpperBound)
-    if (paramValue > m_upperBound)
-      getFunction()->setParameter(getIndex(), m_upperBound, false);
+  if (m_hasLowerBound && paramValue < m_lowerBound)
+    setParameter(m_lowerBound, false);
+  if (m_hasUpperBound && paramValue > m_upperBound)
+    setParameter(m_upperBound, false);
 }
 
 double BoundaryConstraint::check() {
   if (!(m_hasLowerBound || m_hasUpperBound)) {
     g_log.warning()
         << "No bounds have been set on BoundaryConstraint for parameter "
-        << m_parameterName << ". Therefore"
+        << parameterName() << ". Therefore"
         << " this constraint serves no purpose!";
     return 0.0;
   }
 
-  double paramValue = getFunction()->getParameter(getIndex());
+  double paramValue = getParameter();
 
   double penalty = 0.0;
 
@@ -198,7 +208,7 @@ double BoundaryConstraint::checkDeriv() {
     return penalty;
   }
 
-  double paramValue = getFunction()->getParameter(getIndex());
+  double paramValue = getParameter();
 
   if (m_hasLowerBound)
     if (paramValue < m_lowerBound) {
@@ -224,7 +234,7 @@ double BoundaryConstraint::checkDeriv2() {
     return penalty;
   }
 
-  double paramValue = getFunction()->getParameter(getIndex());
+  double paramValue = getParameter();
 
   if (m_hasLowerBound)
     if (paramValue < m_lowerBound)
@@ -241,7 +251,7 @@ std::string BoundaryConstraint::asString() const {
   if (m_hasLowerBound) {
     ostr << m_lowerBound << '<';
   }
-  ostr << getFunction()->parameterName(getIndex());
+  ostr << parameterName();
   if (m_hasUpperBound) {
     ostr << '<' << m_upperBound;
   }
diff --git a/Framework/CurveFitting/src/CostFunctions/CostFuncFitting.cpp b/Framework/CurveFitting/src/CostFunctions/CostFuncFitting.cpp
index aaa5aef879b33277ea5059d91e9566aba6553c95..65423f52afde1892047b246890e9f20978767a95 100644
--- a/Framework/CurveFitting/src/CostFunctions/CostFuncFitting.cpp
+++ b/Framework/CurveFitting/src/CostFunctions/CostFuncFitting.cpp
@@ -4,6 +4,7 @@
 #include "MantidCurveFitting/CostFunctions/CostFuncFitting.h"
 #include "MantidCurveFitting/GSLJacobian.h"
 #include "MantidAPI/IConstraint.h"
+#include "MantidKernel/Exception.h"
 
 #include <gsl/gsl_multifit_nlin.h>
 #include <limits>
@@ -163,12 +164,12 @@ void CostFuncFitting::calFittingErrors(const GSLMatrix &covar, double chi2) {
       new Kernel::Matrix<double>(np, np));
   size_t ia = 0;
   for (size_t i = 0; i < np; ++i) {
-    if (m_function->isFixed(i)) {
+    if (!m_function->isActive(i)) {
       m_function->setError(i, 0);
     } else {
       size_t ja = 0;
       for (size_t j = 0; j < np; ++j) {
-        if (!m_function->isFixed(j)) {
+        if (m_function->isActive(j)) {
           (*covarMatrix)[i][j] = covar.get(ia, ja);
           ++ja;
         }
@@ -193,7 +194,7 @@ void CostFuncFitting::calTransformationMatrixNumerically(GSLMatrix &tm) {
   tm.resize(na, na);
   size_t ia = 0;
   for (size_t i = 0; i < np; ++i) {
-    if (m_function->isFixed(i))
+    if (!m_function->isActive(i))
       continue;
     double p0 = m_function->getParameter(i);
     for (size_t j = 0; j < na; ++j) {
@@ -236,11 +237,11 @@ void CostFuncFitting::reset() const {
  * @param params :: A vector to copy the parameters from
  */
 void CostFuncFitting::setParameters(const GSLVector &params) {
-  if (nParams() != params.size()) {
-    throw std::runtime_error(
-        "Parameter vector has wrong size in CostFuncLeastSquares.");
+  auto np = nParams();
+  if (np != params.size()) {
+    throw Kernel::Exception::FitSizeWarning(params.size(), np);
   }
-  for (size_t i = 0; i < nParams(); ++i) {
+  for (size_t i = 0; i < np; ++i) {
     setParameter(i, params.get(i));
   }
   m_function->applyTies();
@@ -251,10 +252,11 @@ void CostFuncFitting::setParameters(const GSLVector &params) {
  * @param params :: A vector to copy the parameters to
  */
 void CostFuncFitting::getParameters(GSLVector &params) const {
-  if (params.size() != nParams()) {
-    params.resize(nParams());
+  auto np = nParams();
+  if (params.size() != np) {
+    params.resize(np);
   }
-  for (size_t i = 0; i < nParams(); ++i) {
+  for (size_t i = 0; i < np; ++i) {
     params.set(i, getParameter(i));
   }
 }
diff --git a/Framework/CurveFitting/src/FuncMinimizers/LevenbergMarquardtMDMinimizer.cpp b/Framework/CurveFitting/src/FuncMinimizers/LevenbergMarquardtMDMinimizer.cpp
index 9e173b59b4e2e00bee7402d495b4473c2cd4d1d4..21f3835ec015550a0f10fe8d559310e1db8c77c7 100644
--- a/Framework/CurveFitting/src/FuncMinimizers/LevenbergMarquardtMDMinimizer.cpp
+++ b/Framework/CurveFitting/src/FuncMinimizers/LevenbergMarquardtMDMinimizer.cpp
@@ -177,11 +177,13 @@ bool LevenbergMarquardtMDMinimizer::iterate(size_t) {
   // save previous state
   m_leastSquares->push();
   // Update the parameters of the cost function.
-  for (size_t i = 0; i < n; ++i) {
-    double d = m_leastSquares->getParameter(i) + dx.get(i);
-    m_leastSquares->setParameter(i, d);
-    if (debug) {
-      g_log.warning() << "Parameter(" << i << ")=" << d << '\n';
+  GSLVector parameters(n);
+  m_leastSquares->getParameters(parameters);
+  parameters += dx;
+  m_leastSquares->setParameters(parameters);
+  if (debug) {
+    for (size_t i = 0; i < n; ++i) {
+      g_log.warning() << "Parameter(" << i << ")=" << parameters[i] << '\n';
     }
   }
   m_leastSquares->getFittingFunction()->applyTies();
diff --git a/Framework/CurveFitting/src/Functions/ComptonScatteringCountRate.cpp b/Framework/CurveFitting/src/Functions/ComptonScatteringCountRate.cpp
index 665f81cffca2e0b9dc4ff1522bd8756815856911..c8c203e55af27f2cb21351c049469fde7b45138a 100644
--- a/Framework/CurveFitting/src/Functions/ComptonScatteringCountRate.cpp
+++ b/Framework/CurveFitting/src/Functions/ComptonScatteringCountRate.cpp
@@ -312,7 +312,7 @@ void ComptonScatteringCountRate::cacheComptonProfile(
   auto fixedParams = profile->intensityParameterIndices();
   for (auto fixedParam : fixedParams) {
     const size_t indexOfFixed = paramsOffset + fixedParam;
-    this->fix(indexOfFixed);
+    this->setParameterStatus(indexOfFixed, Tied);
     m_fixedParamIndices.push_back(indexOfFixed);
   }
 }
@@ -330,12 +330,11 @@ void ComptonScatteringCountRate::cacheBackground(
     const size_t npars =
         static_cast<size_t>(m_bkgdPolyN + 1); // + constant term
     // we assume the parameters are at index 0->N on the background so we need
-    // to
-    // reverse them
+    // to reverse them
     for (size_t i = npars; i > 0; --i) // i = from npars->1
     {
       const size_t indexOfFixed = paramsOffset + (i - 1);
-      this->fix(indexOfFixed);
+      this->setParameterStatus(indexOfFixed, Tied);
       m_fixedParamIndices.push_back(indexOfFixed);
     }
   } else {
diff --git a/Framework/CurveFitting/src/Functions/CrystalFieldMultiSpectrum.cpp b/Framework/CurveFitting/src/Functions/CrystalFieldMultiSpectrum.cpp
index 62bda55ed9eb48d39d9e07a6d5b5ab4f66b1fbec..7dec77fd50fe295284bc82eff16b8b33c8432078 100644
--- a/Framework/CurveFitting/src/Functions/CrystalFieldMultiSpectrum.cpp
+++ b/Framework/CurveFitting/src/Functions/CrystalFieldMultiSpectrum.cpp
@@ -15,6 +15,7 @@
 #include "MantidAPI/ParameterTie.h"
 
 #include "MantidKernel/Exception.h"
+#include <iostream>
 
 namespace Mantid {
 namespace CurveFitting {
@@ -110,6 +111,7 @@ size_t CrystalFieldMultiSpectrum::getNumberDomains() const {
 
 std::vector<IFunction_sptr>
 CrystalFieldMultiSpectrum::createEquivalentFunctions() const {
+  checkTargetFunction();
   std::vector<IFunction_sptr> funs;
   auto &composite = dynamic_cast<CompositeFunction &>(*m_target);
   for (size_t i = 0; i < composite.nFunctions(); ++i) {
@@ -123,24 +125,24 @@ void CrystalFieldMultiSpectrum::setAttribute(const std::string &name,
                                              const Attribute &attr) {
   if (name == "Temperatures") {
     // Define (declare) the parameters for intensity scaling.
-    auto nSpec = attr.asVector().size();
+    const auto nSpec = attr.asVector().size();
     dynamic_cast<Peaks &>(*m_source).declareIntensityScaling(nSpec);
     m_nOwnParams = m_source->nParams();
     m_fwhmX.resize(nSpec);
     m_fwhmY.resize(nSpec);
     for (size_t iSpec = 0; iSpec < nSpec; ++iSpec) {
-      auto suffix = std::to_string(iSpec);
+      const auto suffix = std::to_string(iSpec);
       declareAttribute("FWHMX" + suffix, Attribute(m_fwhmX[iSpec]));
       declareAttribute("FWHMY" + suffix, Attribute(m_fwhmY[iSpec]));
     }
   }
   if (name == "PhysicalProperties") {
-    auto physpropId = attr.asVector();
-    auto nSpec = physpropId.size();
+    const auto physpropId = attr.asVector();
+    const auto nSpec = physpropId.size();
     auto &source = dynamic_cast<Peaks &>(*m_source);
     for (size_t iSpec = 0; iSpec < nSpec; ++iSpec) {
-      auto suffix = std::to_string(iSpec);
-      auto pptype = static_cast<int>(physpropId[iSpec]);
+      const auto suffix = std::to_string(iSpec);
+      const auto pptype = static_cast<int>(physpropId[iSpec]);
       switch (pptype) {
       case MagneticMoment: // Hmag, Hdir, inverse, Unit, powder
         declareAttribute("Hmag" + suffix, Attribute(1.0));
@@ -182,32 +184,34 @@ void CrystalFieldMultiSpectrum::buildTargetFunction() const {
   ham += hz;
 
   // Get the temperatures from the attribute
-  auto temperatures = getAttribute("Temperatures").asVector();
-  if (temperatures.empty()) {
+  m_temperatures = getAttribute("Temperatures").asVector();
+  if (m_temperatures.empty()) {
     throw std::runtime_error("Vector of temperatures cannot be empty.");
   }
   // Get the FWHMs from the attribute and check for consistency.
-  auto fwhms = getAttribute("FWHMs").asVector();
-  if (fwhms.size() != temperatures.size()) {
-    if (fwhms.empty()) {
+  m_FWHMs = getAttribute("FWHMs").asVector();
+  if (m_FWHMs.size() != m_temperatures.size()) {
+    if (m_FWHMs.empty()) {
       throw std::runtime_error("Vector of FWHMs cannot be empty.");
     }
-    if (fwhms.size() == 1) {
-      auto fwhm = fwhms.front();
-      fwhms.resize(temperatures.size(), fwhm);
+    if (m_FWHMs.size() == 1) {
+      auto fwhm = m_FWHMs.front();
+      m_FWHMs.resize(m_temperatures.size(), fwhm);
     } else {
       throw std::runtime_error("Vector of FWHMs must either have same size as "
-                               "Temperatures or have size 1.");
+                               "Temperatures (" +
+                               std::to_string(m_temperatures.size()) +
+                               ") or have size 1.");
     }
   }
-  auto nSpec = temperatures.size();
+  const auto nSpec = m_temperatures.size();
   // Get a list of "spectra" which corresponds to physical properties
-  auto physprops = getAttribute("PhysicalProperties").asVector();
+  const auto physprops = getAttribute("PhysicalProperties").asVector();
   if (physprops.empty()) {
     m_physprops.resize(nSpec, 0); // Assume no physical properties - just INS
   } else if (physprops.size() != nSpec) {
     if (physprops.size() == 1) {
-      int physprop = (int)physprops.front();
+      int physprop = static_cast<int>(physprops.front());
       m_physprops.resize(nSpec, physprop);
     } else {
       throw std::runtime_error("Vector of PhysicalProperties must have same "
@@ -216,7 +220,7 @@ void CrystalFieldMultiSpectrum::buildTargetFunction() const {
   } else {
     m_physprops.clear();
     for (auto elem : physprops) {
-      m_physprops.push_back((int)elem);
+      m_physprops.push_back(static_cast<int>(elem));
     }
   }
   // Create the single-spectrum functions.
@@ -228,7 +232,7 @@ void CrystalFieldMultiSpectrum::buildTargetFunction() const {
   for (size_t i = 0; i < nSpec; ++i) {
     if (m_physprops[i] > 0) {
       // This "spectrum" is actually a physical properties dataset.
-      fun->addFunction(buildPhysprop(nre, en, wf, ham, temperatures[i], i));
+      fun->addFunction(buildPhysprop(nre, en, wf, ham, m_temperatures[i], i));
     } else {
       if (m_fwhmX[i].empty()) {
         auto suffix = std::to_string(i);
@@ -236,7 +240,7 @@ void CrystalFieldMultiSpectrum::buildTargetFunction() const {
         m_fwhmY[i] = IFunction::getAttribute("FWHMY" + suffix).asVector();
       }
       fun->addFunction(
-          buildSpectrum(nre, en, wf, temperatures[i], fwhms[i], i));
+          buildSpectrum(nre, en, wf, m_temperatures[i], m_FWHMs[i], i));
     }
     fun->setDomainIndex(i, i);
   }
@@ -267,7 +271,7 @@ void CrystalFieldMultiSpectrum::calcExcitations(
   } else {
     intensityScaling = getParameter(source.m_IntensityScalingIdx[iSpec]);
   }
-  auto nPeaks = eExcitations.size();
+  const auto nPeaks = eExcitations.size();
   values.expand(2 * nPeaks);
   for (size_t i = 0; i < nPeaks; ++i) {
     values.setCalculated(i, eExcitations.get(i));
@@ -283,11 +287,11 @@ API::IFunction_sptr CrystalFieldMultiSpectrum::buildSpectrum(
   calcExcitations(nre, en, wf, temperature, values, iSpec);
   m_nPeaks[iSpec] = CrystalFieldUtils::calculateNPeaks(values);
 
-  auto fwhmVariation = getAttribute("FWHMVariation").asDouble();
-  auto peakShape = IFunction::getAttribute("PeakShape").asString();
+  const auto fwhmVariation = getAttribute("FWHMVariation").asDouble();
+  const auto peakShape = IFunction::getAttribute("PeakShape").asString();
   auto bkgdShape = IFunction::getAttribute("Background").asUnquotedString();
-  size_t nRequiredPeaks = IFunction::getAttribute("NPeaks").asInt();
-  bool fixAllPeaks = getAttribute("FixAllPeaks").asBool();
+  const size_t nRequiredPeaks = IFunction::getAttribute("NPeaks").asInt();
+  const bool fixAllPeaks = getAttribute("FixAllPeaks").asBool();
 
   if (!bkgdShape.empty() && bkgdShape.find("name=") != 0 &&
       bkgdShape.front() != '(') {
@@ -322,7 +326,7 @@ API::IFunction_sptr CrystalFieldMultiSpectrum::buildPhysprop(
     IFunction_sptr retval = IFunction_sptr(new CrystalFieldSusceptibility);
     auto &spectrum = dynamic_cast<CrystalFieldSusceptibility &>(*retval);
     spectrum.setEigensystem(en, wf, nre);
-    auto suffix = std::to_string(iSpec);
+    const auto suffix = std::to_string(iSpec);
     spectrum.setAttribute("Hdir", getAttribute("Hdir" + suffix));
     spectrum.setAttribute("inverse", getAttribute("inverse" + suffix));
     spectrum.setAttribute("powder", getAttribute("powder" + suffix));
@@ -335,7 +339,7 @@ API::IFunction_sptr CrystalFieldMultiSpectrum::buildPhysprop(
     auto &spectrum = dynamic_cast<CrystalFieldMagnetisation &>(*retval);
     spectrum.setHamiltonian(ham, nre);
     spectrum.setAttribute("Temperature", Attribute(temperature));
-    auto suffix = std::to_string(iSpec);
+    const auto suffix = std::to_string(iSpec);
     spectrum.setAttribute("Unit", getAttribute("Unit" + suffix));
     spectrum.setAttribute("Hdir", getAttribute("Hdir" + suffix));
     spectrum.setAttribute("powder", getAttribute("powder" + suffix));
@@ -345,7 +349,7 @@ API::IFunction_sptr CrystalFieldMultiSpectrum::buildPhysprop(
     IFunction_sptr retval = IFunction_sptr(new CrystalFieldMoment);
     auto &spectrum = dynamic_cast<CrystalFieldMoment &>(*retval);
     spectrum.setHamiltonian(ham, nre);
-    auto suffix = std::to_string(iSpec);
+    const auto suffix = std::to_string(iSpec);
     spectrum.setAttribute("Unit", getAttribute("Unit" + suffix));
     spectrum.setAttribute("Hdir", getAttribute("Hdir" + suffix));
     spectrum.setAttribute("Hmag", getAttribute("Hmag" + suffix));
@@ -374,11 +378,11 @@ void CrystalFieldMultiSpectrum::updateTargetFunction() const {
   peakCalculator.calculateEigenSystem(en, wf, ham, hz, nre);
   ham += hz;
 
-  auto temperatures = getAttribute("Temperatures").asVector();
   auto &fun = dynamic_cast<MultiDomainFunction &>(*m_target);
   try {
-    for (size_t i = 0; i < temperatures.size(); ++i) {
-      updateSpectrum(*fun.getFunction(i), nre, en, wf, ham, temperatures[i], i);
+    for (size_t i = 0; i < m_temperatures.size(); ++i) {
+      updateSpectrum(*fun.getFunction(i), nre, en, wf, ham, m_temperatures[i],
+                     m_FWHMs[i], i);
     }
   } catch (std::out_of_range &) {
     buildTargetFunction();
@@ -390,7 +394,7 @@ void CrystalFieldMultiSpectrum::updateTargetFunction() const {
 void CrystalFieldMultiSpectrum::updateSpectrum(
     API::IFunction &spectrum, int nre, const DoubleFortranVector &en,
     const ComplexFortranMatrix &wf, const ComplexFortranMatrix &ham,
-    double temperature, size_t iSpec) const {
+    double temperature, double fwhm, size_t iSpec) const {
   switch (m_physprops[iSpec]) {
   case HeatCapacity: {
     auto &heatcap = dynamic_cast<CrystalFieldHeatCapacity &>(spectrum);
@@ -416,13 +420,15 @@ void CrystalFieldMultiSpectrum::updateSpectrum(
     break;
   }
   default:
-    auto fwhmVariation = getAttribute("FWHMVariation").asDouble();
+    const auto fwhmVariation = getAttribute("FWHMVariation").asDouble();
+    const auto peakShape = IFunction::getAttribute("PeakShape").asString();
+    const bool fixAllPeaks = getAttribute("FixAllPeaks").asBool();
     FunctionValues values;
     calcExcitations(nre, en, wf, temperature, values, iSpec);
     auto &composite = dynamic_cast<API::CompositeFunction &>(spectrum);
     m_nPeaks[iSpec] = CrystalFieldUtils::updateSpectrumFunction(
-        composite, values, m_nPeaks[iSpec], 1, m_fwhmX[iSpec], m_fwhmY[iSpec],
-        fwhmVariation);
+        composite, peakShape, values, 1, m_fwhmX[iSpec], m_fwhmY[iSpec],
+        fwhmVariation, fwhm, fixAllPeaks);
   }
 }
 
diff --git a/Framework/CurveFitting/src/Functions/CrystalFieldPeakUtils.cpp b/Framework/CurveFitting/src/Functions/CrystalFieldPeakUtils.cpp
index 0831b47dff7622d6290b92f0477fe80761fae177..55606bca66b3444c41c3e70c5d6bfc5b3f495594 100644
--- a/Framework/CurveFitting/src/Functions/CrystalFieldPeakUtils.cpp
+++ b/Framework/CurveFitting/src/Functions/CrystalFieldPeakUtils.cpp
@@ -8,6 +8,7 @@
 
 #include <algorithm>
 #include <math.h>
+#include <iostream>
 
 namespace Mantid {
 namespace CurveFitting {
@@ -96,10 +97,77 @@ size_t calculateMaxNPeaks(size_t nPeaks) { return nPeaks + nPeaks / 2 + 1; }
 /// @param fwhm :: A width value to pass to the peak.
 inline void ignorePeak(API::IPeakFunction &peak, double fwhm) {
   peak.setHeight(0.0);
-  peak.fixAll();
+  peak.fixAll(true);
   peak.setFwhm(fwhm);
 }
 
+/// Set the width of a peak.
+/// @param peak :: A peak function to set width to.
+/// @param centre :: Peak centre.
+/// @param xVec :: x-values of a tabulated width function.
+/// @param yVec :: y-values of a tabulated width function.
+/// @param fwhmVariation :: A variation in the peak width allowed in a fit.
+/// @param defaultFWHM :: A default value for the FWHM to use if xVec and yVec
+///        are empty.
+/// @param useDefaultFWHM :: If true always use defaultFWHM for the width.
+void setPeakWidth(API::IPeakFunction &peak, double centre,
+                  const std::vector<double> &xVec,
+                  const std::vector<double> &yVec, double fwhmVariation,
+                  double defaultFWHM, bool useDefaultFWHM) {
+  if (useDefaultFWHM) {
+    peak.setFwhm(defaultFWHM);
+  } else {
+    auto fwhm = calculateWidth(centre, xVec, yVec);
+    if (fwhm > 0.0) {
+      peak.setFwhm(fwhm);
+      setWidthConstraint(peak, fwhm, fwhmVariation);
+    } else {
+      ignorePeak(peak, defaultFWHM);
+    }
+  }
+}
+
+/// Create a single peak.
+/// @param peakShape :: A shape of the of created peak as a name of an
+/// IPeakFunction.
+/// @param centre :: Peak centre.
+/// @param intensity :: Integrated intensity of the peak.
+/// @param xVec :: x-values of a tabulated width function.
+/// @param yVec :: y-values of a tabulated width function.
+/// @param fwhmVariation :: A variation in the peak width allowed in a fit.
+/// @param defaultFWHM :: A default value for the FWHM to use if xVec and yVec
+///        are empty.
+/// @param isGood :: If the peak good and may have free fitting parameters.
+/// @param fixAllPeaks :: If true all parameters should be fixed.
+API::IPeakFunction_sptr createPeak(const std::string &peakShape, double centre,
+                                   double intensity,
+                                   const std::vector<double> &xVec,
+                                   const std::vector<double> &yVec,
+                                   double fwhmVariation, double defaultFWHM,
+                                   bool isGood, bool fixAllPeaks) {
+  auto fun = API::FunctionFactory::Instance().createFunction(peakShape);
+  auto peak = boost::dynamic_pointer_cast<API::IPeakFunction>(fun);
+  if (!peak) {
+    throw std::runtime_error("A peak function is expected.");
+  }
+  bool useDefaultFWHM = xVec.empty();
+  const bool fixByDefault = true;
+  if (isGood) {
+    peak->setCentre(centre);
+    peak->setIntensity(intensity);
+    setPeakWidth(*peak, centre, xVec, yVec, fwhmVariation, defaultFWHM,
+                 useDefaultFWHM);
+    peak->fixCentre(fixByDefault);
+    peak->fixIntensity(fixByDefault);
+  } else {
+    ignorePeak(*peak, defaultFWHM);
+  }
+  if (fixAllPeaks) {
+    peak->fixAll(fixByDefault);
+  }
+  return peak;
+}
+
 /// Populates a spectrum with peaks of type given by peakShape argument.
 /// @param spectrum :: A composite function that is a collection of peaks.
 /// @param peakShape :: A shape of each peak as a name of an IPeakFunction.
@@ -125,97 +193,129 @@ size_t buildSpectrumFunction(API::CompositeFunction &spectrum,
     throw std::runtime_error("WidthX and WidthY must have the same size.");
   }
 
-  bool useDefaultFWHM = xVec.empty();
   auto nPeaks = calculateNPeaks(centresAndIntensities);
   auto maxNPeaks = calculateMaxNPeaks(nPeaks);
   if (nRequiredPeaks > maxNPeaks) {
     maxNPeaks = nRequiredPeaks;
   }
   for (size_t i = 0; i < maxNPeaks; ++i) {
-    auto fun = API::FunctionFactory::Instance().createFunction(peakShape);
-    auto peak = boost::dynamic_pointer_cast<API::IPeakFunction>(fun);
-    if (!peak) {
-      throw std::runtime_error("A peak function is expected.");
-    }
-    if (i < nPeaks) {
-      auto centre = centresAndIntensities.getCalculated(i);
-      peak->setCentre(centre);
-      peak->setIntensity(centresAndIntensities.getCalculated(i + nPeaks));
-      if (useDefaultFWHM) {
-        peak->setFwhm(defaultFWHM);
-      } else {
-        auto fwhm = calculateWidth(centre, xVec, yVec);
-        if (fwhm > 0.0) {
-          peak->setFwhm(fwhm);
-          setWidthConstraint(*peak, fwhm, fwhmVariation);
-        } else {
-          ignorePeak(*peak, defaultFWHM);
-        }
-      }
-      peak->fixCentre();
-      peak->fixIntensity();
-    } else {
-      ignorePeak(*peak, defaultFWHM);
+    const bool isGood = i < nPeaks;
+    const auto centre = isGood ? centresAndIntensities.getCalculated(i) : 0.0;
+    const auto intensity =
+        isGood ? centresAndIntensities.getCalculated(i + nPeaks) : 0.0;
+    auto peak = createPeak(peakShape, centre, intensity, xVec, yVec,
+                           fwhmVariation, defaultFWHM, isGood, fixAllPeaks);
+    spectrum.addFunction(peak);
+  }
+  return nPeaks;
+}
+
+/// Update width of a peak.
+/// @param peak :: A peak to update.
+/// @param centre :: Peak centre.
+/// @param xVec :: x-values of a tabulated width function.
+/// @param yVec :: y-values of a tabulated width function.
+/// @param fwhmVariation :: A variation in the peak width allowed in a fit.
+void updatePeakWidth(API::IPeakFunction &peak, double centre,
+                     const std::vector<double> &xVec,
+                     const std::vector<double> &yVec, double fwhmVariation) {
+  bool mustUpdateWidth = !xVec.empty();
+  if (mustUpdateWidth) {
+    auto fwhm = peak.fwhm();
+    auto expectedFwhm = calculateWidth(centre, xVec, yVec);
+    if (expectedFwhm <= 0.0) {
+      ignorePeak(peak, fwhm);
+    } else if (fabs(fwhm - expectedFwhm) > fwhmVariation) {
+      peak.setFwhm(expectedFwhm);
+      setWidthConstraint(peak, expectedFwhm, fwhmVariation);
     }
+  }
+}
+
+/// Update a single peak.
+/// @param peak :: A peak to update.
+/// @param centre :: New peak centre.
+/// @param intensity :: New integrated intensity.
+/// @param xVec :: x-values of a tabulated width function.
+/// @param yVec :: y-values of a tabulated width function.
+/// @param fwhmVariation :: A variation in the peak width allowed in a fit.
+/// @param isGood :: If the peak good and may have free fitting parameters.
+/// @param fixAllPeaks :: If true all parameters should be fixed.
+void updatePeak(API::IPeakFunction &peak, double centre, double intensity,
+                const std::vector<double> &xVec,
+                const std::vector<double> &yVec, double fwhmVariation,
+                bool isGood, bool fixAllPeaks) {
+  const bool fixByDefault = true;
+  if (isGood) {
+    peak.unfixAllDefault();
+    peak.setCentre(centre);
+    peak.setIntensity(intensity);
+    updatePeakWidth(peak, centre, xVec, yVec, fwhmVariation);
+    peak.unfixIntensity();
+    peak.fixIntensity(fixByDefault);
     if (fixAllPeaks) {
-      peak->fixAll();
+      peak.fixAll(fixByDefault);
     }
-    spectrum.addFunction(peak);
+  } else {
+    peak.setHeight(0.0);
+    peak.fixAllActive(fixByDefault);
   }
-  return nPeaks;
 }
 
 /// Update the peaks parameters after recalculationof the crystal field.
 /// @param spectrum :: A composite function containings the peaks to update.
 ///                    May contain other functions (background) fix indices
 ///                    < iFirst.
+/// @param peakShape :: A shape of each peak as a name of an IPeakFunction.
 /// @param centresAndIntensities :: A FunctionValues object containing centres
 ///        and intensities for the peaks. First nPeaks calculated values are the
 ///        centres and the following nPeaks values are the intensities.
-/// @param nOriginalPeaks :: Number of actual peaks the spectrum had before the
-///        update.This update can change the number of actual peaks.
 /// @param iFirst :: The first index in the composite function (spectrum) at
 ///        which the peaks begin.
 /// @param xVec :: x-values of a tabulated width function.
 /// @param yVec :: y-values of a tabulated width function.
 /// @param fwhmVariation :: A variation in the peak width allowed in a fit.
+/// @param defaultFWHM :: A default value for the FWHM to use if xVec and yVec
+///        are empty.
+/// @param fixAllPeaks :: If true fix all peak parameters
 /// @return :: The new number of fitted peaks.
 size_t updateSpectrumFunction(API::CompositeFunction &spectrum,
+                              const std::string &peakShape,
                               const FunctionValues &centresAndIntensities,
-                              size_t nOriginalPeaks, size_t iFirst,
-                              const std::vector<double> &xVec,
+                              size_t iFirst, const std::vector<double> &xVec,
                               const std::vector<double> &yVec,
-                              double fwhmVariation) {
+                              double fwhmVariation, double defaultFWHM,
+                              bool fixAllPeaks) {
   size_t nGoodPeaks = calculateNPeaks(centresAndIntensities);
   size_t maxNPeaks = calculateMaxNPeaks(nGoodPeaks);
-  bool mustUpdateWidth = !xVec.empty();
+  size_t nFunctions = spectrum.nFunctions();
 
   for (size_t i = 0; i < maxNPeaks; ++i) {
-    auto fun = spectrum.getFunction(i + iFirst);
-    auto &peak = dynamic_cast<API::IPeakFunction &>(*fun);
-    if (i < nGoodPeaks) {
-      auto centre = centresAndIntensities.getCalculated(i);
-      peak.setCentre(centre);
-      peak.setIntensity(centresAndIntensities.getCalculated(i + nGoodPeaks));
-      if (mustUpdateWidth) {
-        auto fwhm = peak.fwhm();
-        auto expectedFwhm = calculateWidth(centre, xVec, yVec);
-        if (expectedFwhm <= 0.0) {
-          ignorePeak(peak, fwhm);
-        } else if (fabs(fwhm - expectedFwhm) > fwhmVariation) {
-          peak.setFwhm(expectedFwhm);
-          setWidthConstraint(peak, expectedFwhm, fwhmVariation);
-        }
-      }
-      peak.unfixIntensity();
-      peak.fixIntensity();
+    const bool isGood = i < nGoodPeaks;
+    auto centre = isGood ? centresAndIntensities.getCalculated(i) : 0.0;
+    auto intensity =
+        isGood ? centresAndIntensities.getCalculated(i + nGoodPeaks) : 0.0;
+
+    if (i < nFunctions) {
+      auto fun = spectrum.getFunction(i + iFirst);
+      auto &peak = dynamic_cast<API::IPeakFunction &>(*fun);
+      updatePeak(peak, centre, intensity, xVec, yVec, fwhmVariation, isGood,
+                 fixAllPeaks);
     } else {
-      peak.setHeight(0.0);
-      if (i > nOriginalPeaks) {
-        peak.fixAll();
-      }
+      auto peakPtr =
+          createPeak(peakShape, centre, intensity, xVec, yVec, fwhmVariation,
+                     defaultFWHM, isGood, fixAllPeaks);
+      spectrum.addFunction(peakPtr);
     }
   }
+  // If there are any peaks above the maxNPeaks, ignore them
+  // but don't remove
+  for (size_t i = maxNPeaks; i < nFunctions - iFirst; ++i) {
+    auto fun = spectrum.getFunction(i + iFirst);
+    auto &peak = dynamic_cast<API::IPeakFunction &>(*fun);
+    const auto fwhm = peak.fwhm();
+    ignorePeak(peak, fwhm);
+  }
   return nGoodPeaks;
 }
 
diff --git a/Framework/CurveFitting/src/Functions/CrystalFieldSpectrum.cpp b/Framework/CurveFitting/src/Functions/CrystalFieldSpectrum.cpp
index 8c60bd38a19644861b01f5723b28eda4576461d4..5776320e823c0791e23d64d999a8b717ddec87d3 100644
--- a/Framework/CurveFitting/src/Functions/CrystalFieldSpectrum.cpp
+++ b/Framework/CurveFitting/src/Functions/CrystalFieldSpectrum.cpp
@@ -78,22 +78,21 @@ void CrystalFieldSpectrum::updateTargetFunction() const {
     return;
   }
   m_dirty = false;
+  auto peakShape = getAttribute("PeakShape").asString();
   auto xVec = getAttribute("FWHMX").asVector();
   auto yVec = getAttribute("FWHMY").asVector();
   auto fwhmVariation = getAttribute("FWHMVariation").asDouble();
+  auto defaultFWHM = getAttribute("FWHM").asDouble();
+  bool fixAllPeaks = getAttribute("FixAllPeaks").asBool();
   FunctionDomainGeneral domain;
   FunctionValues values;
   m_source->function(domain, values);
   m_target->setAttribute("NumDeriv", this->getAttribute("NumDeriv"));
   auto &spectrum = dynamic_cast<CompositeFunction &>(*m_target);
   m_nPeaks = CrystalFieldUtils::calculateNPeaks(values);
-  auto maxNPeaks = CrystalFieldUtils::calculateMaxNPeaks(m_nPeaks);
-  if (maxNPeaks > spectrum.nFunctions()) {
-    buildTargetFunction();
-  } else {
-    CrystalFieldUtils::updateSpectrumFunction(spectrum, values, m_nPeaks, 0,
-                                              xVec, yVec, fwhmVariation);
-  }
+  CrystalFieldUtils::updateSpectrumFunction(spectrum, peakShape, values, 0,
+                                            xVec, yVec, fwhmVariation,
+                                            defaultFWHM, fixAllPeaks);
   storeReadOnlyAttribute("NPeaks", Attribute(static_cast<int>(m_nPeaks)));
 }
 
@@ -109,29 +108,15 @@ std::string CrystalFieldSpectrum::asString() const {
       ostr << ',' << attName << '=' << attValue;
     }
   }
-  // Print own parameters
-  for (size_t i = 0; i < m_nOwnParams; i++) {
-    const ParameterTie *tie = getTie(i);
-    if (!tie || !tie->isDefault()) {
-      ostr << ',' << parameterName(i) << '=' << getParameter(i);
-    }
-  }
-
-  // collect non-default constraints
-  std::vector<std::string> constraints;
-  for (size_t i = 0; i < m_nOwnParams; i++) {
-    auto constraint = writeConstraint(i);
-    if (!constraint.empty()) {
-      constraints.push_back(constraint);
-    }
-  }
-
-  // collect the non-default ties
   std::vector<std::string> ties;
+  // Print own parameters
   for (size_t i = 0; i < m_nOwnParams; i++) {
-    auto tie = writeTie(i);
-    if (!tie.empty()) {
-      ties.push_back(tie);
+    std::ostringstream paramOut;
+    paramOut << parameterName(i) << '=' << getParameter(i);
+    if (isActive(i)) {
+      ostr << ',' << paramOut.str();
+    } else if (isFixed(i)) {
+      ties.push_back(paramOut.str());
     }
   }
 
@@ -155,24 +140,21 @@ std::string CrystalFieldSpectrum::asString() const {
         ostr << ",f" << ip << "." << peak.parameterName(i) << '='
              << peak.getParameter(i);
       }
-      auto constraint = writeConstraint(i);
-      if (!constraint.empty()) {
-        constraints.push_back(constraint);
-      }
-      auto tieStr = writeTie(i);
-      if (!tieStr.empty()) {
-        ties.push_back(tieStr);
-      }
     }
   } // for peaks
 
+  // collect non-default constraints
+  std::string constraints = writeConstraints();
   // print constraints
   if (!constraints.empty()) {
-    ostr << ",constraints=("
-         << Kernel::Strings::join(constraints.begin(), constraints.end(), ",")
-         << ")";
+    ostr << ",constraints=(" << constraints << ")";
   }
 
+  // collect the non-default ties
+  auto tiesString = writeTies();
+  if (!tiesString.empty()) {
+    ties.push_back(tiesString);
+  }
   // print the ties
   if (!ties.empty()) {
     ostr << ",ties=(" << Kernel::Strings::join(ties.begin(), ties.end(), ",")
diff --git a/Framework/CurveFitting/src/Functions/Gaussian.cpp b/Framework/CurveFitting/src/Functions/Gaussian.cpp
index c3308e094e617165874b59e961e65550551b5e0f..48e567132d9e85f7f5e19cdf4a67b9030c3b13c0 100644
--- a/Framework/CurveFitting/src/Functions/Gaussian.cpp
+++ b/Framework/CurveFitting/src/Functions/Gaussian.cpp
@@ -107,14 +107,16 @@ void Gaussian::setIntensity(const double i) {
   }
 }
 
-void Gaussian::fixCentre() { fixParameter("PeakCentre"); }
+void Gaussian::fixCentre(bool isDefault) {
+  fixParameter("PeakCentre", isDefault);
+}
 
 void Gaussian::unfixCentre() { unfixParameter("PeakCentre"); }
 
-void Gaussian::fixIntensity() {
+void Gaussian::fixIntensity(bool isDefault) {
   std::string formula =
       std::to_string(intensity() / sqrt(2.0 * M_PI)) + "/Sigma";
-  tie("Height", formula, true);
+  tie("Height", formula, isDefault);
 }
 
 void Gaussian::unfixIntensity() { removeTie("Height"); }
diff --git a/Framework/CurveFitting/src/Functions/GramCharlierComptonProfile.cpp b/Framework/CurveFitting/src/Functions/GramCharlierComptonProfile.cpp
index 8054cfe6b052041f1d57b1897c349476638ea65b..deca9e25ada136615b2f854f0b6f926b2dcd29f5 100644
--- a/Framework/CurveFitting/src/Functions/GramCharlierComptonProfile.cpp
+++ b/Framework/CurveFitting/src/Functions/GramCharlierComptonProfile.cpp
@@ -176,7 +176,7 @@ GramCharlierComptonProfile::intensityParameterIndices() const {
   }
   // Include Kfse if it is not fixed
   const size_t kIndex = this->parameterIndex(KFSE_NAME);
-  if (!isFixed(kIndex)) {
+  if (isActive(kIndex)) {
     indices.push_back(kIndex);
   }
 
@@ -375,7 +375,7 @@ void GramCharlierComptonProfile::cacheYSpaceValues(
   // Is FSE fixed at the moment?
   // The ComptonScatteringCountRate fixes it but we still need to know if the
   // user wanted it fixed
-  m_userFixedFSE = this->isFixed(this->parameterIndex(KFSE_NAME));
+  m_userFixedFSE = !this->isActive(this->parameterIndex(KFSE_NAME));
 
   const auto &yspace = ySpace();
   const auto &modq = modQ();
diff --git a/Framework/CurveFitting/src/Functions/Lorentzian.cpp b/Framework/CurveFitting/src/Functions/Lorentzian.cpp
index 321c971ae6f401ca9defe4a3a768ec03a5f2eedc..4b949cadbc1774ec370e4b3935419d66856341f0 100644
--- a/Framework/CurveFitting/src/Functions/Lorentzian.cpp
+++ b/Framework/CurveFitting/src/Functions/Lorentzian.cpp
@@ -56,11 +56,15 @@ void Lorentzian::setFwhm(const double w) {
   setParameter("FWHM", w);
 }
 
-void Lorentzian::fixCentre() { fixParameter("PeakCentre"); }
+void Lorentzian::fixCentre(bool isDefault) {
+  fixParameter("PeakCentre", isDefault);
+}
 
 void Lorentzian::unfixCentre() { unfixParameter("PeakCentre"); }
 
-void Lorentzian::fixIntensity() { fixParameter("Amplitude"); }
+void Lorentzian::fixIntensity(bool isDefault) {
+  fixParameter("Amplitude", isDefault);
+}
 
 void Lorentzian::unfixIntensity() { unfixParameter("Amplitude"); }
 
diff --git a/Framework/CurveFitting/src/GSLFunctions.cpp b/Framework/CurveFitting/src/GSLFunctions.cpp
index f34899b7133b45615d3fc4e5328680d631aacb6e..bb3276dc3d55b3283c0d1af16841775cde555c86 100644
--- a/Framework/CurveFitting/src/GSLFunctions.cpp
+++ b/Framework/CurveFitting/src/GSLFunctions.cpp
@@ -135,8 +135,6 @@ int gsl_df(const gsl_vector *x, void *params, gsl_matrix *J) {
   for (size_t iY = 0; iY < p->n; iY++)
     for (size_t iP = 0; iP < p->p; iP++) {
       J->data[iY * p->p + iP] *= values->getFitWeight(iY);
-      // std::cerr << iY << ' ' << iP << ' ' << J->data[iY*p->p + iP] <<
-      // '\n';
     }
 
   return GSL_SUCCESS;
diff --git a/Framework/CurveFitting/test/Algorithms/FitTest.h b/Framework/CurveFitting/test/Algorithms/FitTest.h
index 3ff7813dd922d863295ea6a5b31e19d371ed22ae..fe65df76444257751752a0d5e07af46c296d3a7e 100644
--- a/Framework/CurveFitting/test/Algorithms/FitTest.h
+++ b/Framework/CurveFitting/test/Algorithms/FitTest.h
@@ -17,6 +17,7 @@
 #include "MantidDataObjects/TableWorkspace.h"
 #include "MantidDataObjects/Workspace2D.h"
 
+#include "MantidTestHelpers/FunctionCreationHelper.h"
 #include "MantidTestHelpers/MultiDomainFunctionHelper.h"
 #include "MantidTestHelpers/WorkspaceCreationHelper.h"
 
@@ -2035,6 +2036,101 @@ public:
     AnalysisDataService::Instance().clear();
   }
 
+  void test_fit_size_change() {
+    auto ws = WorkspaceCreationHelper::create2DWorkspaceFromFunction(
+        [](double x, int) { return 2 * exp(-(5 * x + x * x - 3 * x * x * x)); },
+        1, 0, 1, 0.1);
+    {
+      API::IFunction_sptr fun =
+          boost::make_shared<TestHelpers::FunctionChangesNParams>();
+      TS_ASSERT_EQUALS(fun->nParams(), 1);
+
+      Fit fit;
+      fit.initialize();
+      fit.setRethrows(true);
+      fit.setProperty("Function", fun);
+      fit.setProperty("InputWorkspace", ws);
+      TS_ASSERT_THROWS_NOTHING(fit.execute());
+      TS_ASSERT_EQUALS(fun->nParams(), 5);
+      TS_ASSERT_DELTA(fun->getParameter(0), 1.9936, 0.1);
+      TS_ASSERT_DELTA(fun->getParameter(1), -9.4991, 0.1);
+      TS_ASSERT_DELTA(fun->getParameter(2), 19.1074, 0.1);
+      TS_ASSERT_DELTA(fun->getParameter(3), -17.8434, 0.1);
+      TS_ASSERT_DELTA(fun->getParameter(4), 6.3465, 0.1);
+    }
+    {
+      API::IFunction_sptr fun =
+          boost::make_shared<TestHelpers::FunctionChangesNParams>();
+      TS_ASSERT_EQUALS(fun->nParams(), 1);
+
+      Fit fit;
+      fit.initialize();
+      fit.setRethrows(true);
+      fit.setProperty("Function", fun);
+      fit.setProperty("InputWorkspace", ws);
+      fit.setProperty("Minimizer", "Levenberg-MarquardtMD");
+      TS_ASSERT_THROWS_NOTHING(fit.execute());
+      TS_ASSERT_EQUALS(fun->nParams(), 5);
+      TS_ASSERT_DELTA(fun->getParameter(0), 1.9936, 0.1);
+      TS_ASSERT_DELTA(fun->getParameter(1), -9.4991, 0.1);
+      TS_ASSERT_DELTA(fun->getParameter(2), 19.1074, 0.1);
+      TS_ASSERT_DELTA(fun->getParameter(3), -17.8434, 0.1);
+      TS_ASSERT_DELTA(fun->getParameter(4), 6.3465, 0.1);
+      std::string status = fit.getProperty("OutputStatus");
+      TS_ASSERT_EQUALS(status, "success");
+    }
+
+    AnalysisDataService::Instance().clear();
+  }
+
+  void test_fit_size_change_1() {
+    auto ws = WorkspaceCreationHelper::create2DWorkspaceFromFunction(
+        [](double x, int) { return 2 + x - 0.1 * x * x; }, 1, 0, 1, 0.1);
+    {
+      API::IFunction_sptr fun =
+          boost::make_shared<TestHelpers::FunctionChangesNParams>();
+      TS_ASSERT_EQUALS(fun->nParams(), 1);
+
+      Fit fit;
+      fit.initialize();
+      fit.setRethrows(true);
+      fit.setProperty("Function", fun);
+      fit.setProperty("InputWorkspace", ws);
+      TS_ASSERT_THROWS_NOTHING(fit.execute());
+      TS_ASSERT_EQUALS(fun->nParams(), 5);
+      TS_ASSERT_DELTA(fun->getParameter(0), 2.0, 0.0001);
+      TS_ASSERT_DELTA(fun->getParameter(1), 1.0, 0.0001);
+      TS_ASSERT_DELTA(fun->getParameter(2), -0.1, 0.0001);
+      TS_ASSERT_DELTA(fun->getParameter(3), 0.0, 0.0001);
+      TS_ASSERT_DELTA(fun->getParameter(4), 0.0, 0.0001);
+      std::string status = fit.getProperty("OutputStatus");
+      TS_ASSERT_EQUALS(status, "success");
+    }
+    {
+      API::IFunction_sptr fun =
+          boost::make_shared<TestHelpers::FunctionChangesNParams>();
+      TS_ASSERT_EQUALS(fun->nParams(), 1);
+
+      Fit fit;
+      fit.initialize();
+      fit.setRethrows(true);
+      fit.setProperty("Function", fun);
+      fit.setProperty("InputWorkspace", ws);
+      fit.setProperty("Minimizer", "Levenberg-MarquardtMD");
+      TS_ASSERT_THROWS_NOTHING(fit.execute());
+      TS_ASSERT_EQUALS(fun->nParams(), 5);
+      TS_ASSERT_DELTA(fun->getParameter(0), 2.0, 0.0001);
+      TS_ASSERT_DELTA(fun->getParameter(1), 1.0, 0.0001);
+      TS_ASSERT_DELTA(fun->getParameter(2), -0.1, 0.0001);
+      TS_ASSERT_DELTA(fun->getParameter(3), 0.0, 0.0001);
+      TS_ASSERT_DELTA(fun->getParameter(4), 0.0, 0.0001);
+      std::string status = fit.getProperty("OutputStatus");
+      TS_ASSERT_EQUALS(status, "success");
+    }
+
+    AnalysisDataService::Instance().clear();
+  }
+
 private:
   /// build test input workspaces for the Pawley function Fit tests
   MatrixWorkspace_sptr getWorkspacePawley(const std::string &functionString,
diff --git a/Framework/CurveFitting/test/CMakeLists.txt b/Framework/CurveFitting/test/CMakeLists.txt
index 077bb0bbeda4ebacaf35ad1479cb26092b31d472..59c9204470fd1f3ad3f5d6d19185d9e1e3ca84a8 100644
--- a/Framework/CurveFitting/test/CMakeLists.txt
+++ b/Framework/CurveFitting/test/CMakeLists.txt
@@ -6,6 +6,7 @@ if ( CXXTEST_FOUND )
   # It will go out of scope at the end of this file so doesn't need un-setting
   set ( TESTHELPER_SRCS ../../TestHelpers/src/ComponentCreationHelper.cpp
                         ../../TestHelpers/src/InstrumentCreationHelper.cpp
+                        ../../TestHelpers/src/FunctionCreationHelper.cpp
                         ../../TestHelpers/src/MultiDomainFunctionHelper.cpp
                         ../../TestHelpers/src/StartFrameworkManager.cpp
                         ../../TestHelpers/src/TearDownWorld.cpp
diff --git a/Framework/CurveFitting/test/CompositeFunctionTest.h b/Framework/CurveFitting/test/CompositeFunctionTest.h
index 15433437e8e287e06da2459340885d0293c70ff7..f839e29a6edfc6de2104fbb24be80794607e3595 100644
--- a/Framework/CurveFitting/test/CompositeFunctionTest.h
+++ b/Framework/CurveFitting/test/CompositeFunctionTest.h
@@ -404,6 +404,87 @@ public:
     TS_ASSERT_DELTA(mfun->getParameter("f1.b"), 4.4, 0.01);
     TS_ASSERT_EQUALS(s.getError(), "success");
   }
+
+  void test_constraints_str() {
+    auto fun = FunctionFactory::Instance().createInitialized(
+        "name=Gaussian,constraints=(Height>0)");
+    TS_ASSERT_EQUALS(
+        fun->asString(),
+        "name=Gaussian,Height=0,PeakCentre=0,Sigma=0,constraints=(0<Height)");
+
+    fun = FunctionFactory::Instance().createInitialized(
+        "name=Gaussian,constraints=(Height>0);name=LinearBackground,"
+        "constraints=(A0<0)");
+    TS_ASSERT_EQUALS(fun->asString(), "name=Gaussian,Height=0,PeakCentre=0,"
+                                      "Sigma=0,constraints=(0<Height);name="
+                                      "LinearBackground,A0=0,A1=0,constraints=("
+                                      "A0<0)");
+
+    fun = FunctionFactory::Instance().createInitialized(
+        "name=Gaussian;name=LinearBackground;"
+        "constraints=(f0.Height>0, f1.A0<0)");
+    TS_ASSERT_EQUALS(fun->asString(), "name=Gaussian,Height=0,PeakCentre=0,"
+                                      "Sigma=0;name=LinearBackground,A0=0,A1=0;"
+                                      "constraints=(0<f0.Height,f1.A0<0)");
+
+    fun = FunctionFactory::Instance().createInitialized(
+        "name=Gaussian,constraints=(Height>0);name=LinearBackground,"
+        "constraints=(A0<0);constraints=(f0.Sigma<0, f1.A1>10)");
+    TS_ASSERT_EQUALS(fun->asString(),
+                     "name=Gaussian,Height=0,PeakCentre=0,Sigma=0,constraints=("
+                     "0<Height);name=LinearBackground,A0=0,A1=0,constraints=("
+                     "A0<0);constraints=(f0.Sigma<0,10<f1.A1)");
+  }
+
+  void test_ties_str() {
+    auto fun = FunctionFactory::Instance().createInitialized(
+        "name=Gaussian,ties=(Height=10)");
+    TS_ASSERT_EQUALS(
+        fun->asString(),
+        "name=Gaussian,Height=10,PeakCentre=0,Sigma=0,ties=(Height=10)");
+
+    fun = FunctionFactory::Instance().createInitialized(
+        "name=Gaussian,ties=(Height=10*Sigma)");
+    TS_ASSERT_EQUALS(
+        fun->asString(),
+        "name=Gaussian,Height=0,PeakCentre=0,Sigma=0,ties=(Height=10*Sigma)");
+
+    fun = FunctionFactory::Instance().createInitialized(
+        "name=Gaussian,ties=(Height=10);name=LinearBackground,"
+        "ties=(A0=0)");
+    TS_ASSERT_EQUALS(fun->asString(), "name=Gaussian,Height=10,PeakCentre=0,"
+                                      "Sigma=0,ties=(Height=10);name="
+                                      "LinearBackground,A0=0,A1=0,ties=(A0=0)");
+
+    fun = FunctionFactory::Instance().createInitialized(
+        "name=Gaussian,ties=(Height=10*Sigma);name=LinearBackground,"
+        "ties=(A0=A1)");
+    TS_ASSERT_EQUALS(fun->asString(),
+                     "name=Gaussian,Height=0,PeakCentre=0,Sigma=0,ties=(Height="
+                     "10*Sigma);name=LinearBackground,A0=0,A1=0,ties=(A0=A1)");
+
+    fun = FunctionFactory::Instance().createInitialized(
+        "name=Gaussian;name=LinearBackground;"
+        "ties=(f0.Height=2, f1.A0=f1.A1)");
+    TS_ASSERT_EQUALS(fun->asString(),
+                     "name=Gaussian,Height=2,PeakCentre=0,Sigma=0,ties=(Height="
+                     "2);name=LinearBackground,A0=0,A1=0;ties=(f1.A0=f1.A1)");
+
+    fun = FunctionFactory::Instance().createInitialized(
+        "name=Gaussian;name=LinearBackground;"
+        "ties=(f0.Height=f1.A0=f1.A1)");
+    TS_ASSERT_EQUALS(fun->asString(), "name=Gaussian,Height=0,PeakCentre=0,"
+                                      "Sigma=0;name=LinearBackground,A0=0,A1=0;"
+                                      "ties=(f1.A0=f1.A1,f0.Height=f1.A1)");
+
+    fun = FunctionFactory::Instance().createInitialized(
+        "name=Gaussian,ties=(Height=0);name=LinearBackground,"
+        "ties=(A0=A1);ties=(f0.Sigma=f1.A1)");
+    TS_ASSERT_EQUALS(fun->asString(), "name=Gaussian,Height=0,PeakCentre=0,"
+                                      "Sigma=0,ties=(Height=0);name="
+                                      "LinearBackground,A0=0,A1=0,ties=(A0=A1);"
+                                      "ties=(f0.Sigma=f1.A1)");
+  }
 };
 
 #endif /*CURVEFITTING_COMPOSITEFUNCTIONTEST_H_*/
diff --git a/Framework/CurveFitting/test/Constraints/BoundaryConstraintTest.h b/Framework/CurveFitting/test/Constraints/BoundaryConstraintTest.h
index fb4e27623588072a569982443cc31493c2f98ba3..9d716aa452d709d3dbc16d046abc6cd0ce2e48b4 100644
--- a/Framework/CurveFitting/test/Constraints/BoundaryConstraintTest.h
+++ b/Framework/CurveFitting/test/Constraints/BoundaryConstraintTest.h
@@ -22,40 +22,6 @@ using namespace Mantid::CurveFitting::Constraints;
 
 class BoundaryConstraintTest : public CxxTest::TestSuite {
 public:
-  void test1() {
-    // set up fitting function
-    Gaussian gaus;
-    gaus.initialize();
-    gaus.setCentre(11.2);
-    gaus.setHeight(100.7);
-    gaus.setParameter("Sigma", 1.1);
-
-    BoundaryConstraint bc;
-    bc.reset(&gaus, 2);
-
-    TS_ASSERT(!bc.hasLower());
-    TS_ASSERT(!bc.hasUpper());
-
-    bc.setLower(1.0);
-    bc.setUpper(2.0);
-
-    TS_ASSERT(bc.hasLower());
-    TS_ASSERT(bc.hasUpper());
-
-    BoundaryConstraint bc2;
-
-    bc2.reset(&gaus, 2);
-    bc2.setBounds(10, 20);
-
-    TS_ASSERT_DELTA(bc2.lower(), 10, 0.0001);
-    TS_ASSERT_DELTA(bc2.upper(), 20, 0.0001);
-
-    TS_ASSERT_DELTA(gaus.getParameter("Sigma"), 1.1, 0.0001);
-
-    bc2.setParamToSatisfyConstraint();
-    TS_ASSERT_DELTA(gaus.getParameter("Sigma"), 10.0, 0.0001);
-  }
-
   void testInitialize1() {
     Gaussian gaus;
     gaus.initialize();
@@ -64,7 +30,7 @@ public:
     expr.parse("10<Sigma<20");
     bc.initialize(&gaus, expr, false);
 
-    TS_ASSERT_EQUALS(bc.getParameterName(), "Sigma");
+    TS_ASSERT_EQUALS(bc.parameterName(), "Sigma");
     TS_ASSERT_DELTA(bc.lower(), 10, 0.0001);
     TS_ASSERT_DELTA(bc.upper(), 20, 0.0001);
   }
@@ -77,7 +43,7 @@ public:
     expr.parse("20>Sigma>10");
     bc.initialize(&gaus, expr, false);
 
-    TS_ASSERT_EQUALS(bc.getParameterName(), "Sigma");
+    TS_ASSERT_EQUALS(bc.parameterName(), "Sigma");
     TS_ASSERT_DELTA(bc.lower(), 10, 0.0001);
     TS_ASSERT_DELTA(bc.upper(), 20, 0.0001);
   }
@@ -90,7 +56,7 @@ public:
     expr.parse("10<Sigma");
     bc.initialize(&gaus, expr, false);
 
-    TS_ASSERT_EQUALS(bc.getParameterName(), "Sigma");
+    TS_ASSERT_EQUALS(bc.parameterName(), "Sigma");
     TS_ASSERT_DELTA(bc.lower(), 10, 0.0001);
     TS_ASSERT(!bc.hasUpper());
   }
@@ -103,7 +69,7 @@ public:
     expr.parse("Sigma<20");
     bc.initialize(&gaus, expr, false);
 
-    TS_ASSERT_EQUALS(bc.getParameterName(), "Sigma");
+    TS_ASSERT_EQUALS(bc.parameterName(), "Sigma");
     TS_ASSERT_DELTA(bc.upper(), 20, 0.0001);
     TS_ASSERT(!bc.hasLower());
   }
@@ -134,8 +100,8 @@ public:
     TS_ASSERT(bc.hasLower());
     TS_ASSERT(!bc.hasUpper());
     TS_ASSERT_EQUALS(bc.lower(), 0.0);
-    TS_ASSERT_EQUALS(bc.getParameterName(), "Sigma");
-    TS_ASSERT_EQUALS(bc.getFunction(), &gaus);
+    TS_ASSERT_EQUALS(bc.parameterName(), "Sigma");
+    TS_ASSERT_EQUALS(bc.getLocalFunction(), &gaus);
   }
 
   void testAsString() {
@@ -147,7 +113,7 @@ public:
       expr.parse("Sigma<20");
       bc->initialize(&gaus, expr, false);
 
-      TS_ASSERT_EQUALS(bc->getParameterName(), "Sigma");
+      TS_ASSERT_EQUALS(bc->parameterName(), "Sigma");
       TS_ASSERT_DELTA(bc->upper(), 20, 0.0001);
       TS_ASSERT(!bc->hasLower());
       gaus.addConstraint(std::move(bc));
@@ -162,7 +128,7 @@ public:
     auto bc = dynamic_cast<BoundaryConstraint *>(c);
     TS_ASSERT(bc);
 
-    TS_ASSERT_EQUALS(bc->getParameterName(), "Sigma");
+    TS_ASSERT_EQUALS(bc->parameterName(), "Sigma");
     TS_ASSERT_DELTA(bc->upper(), 20, 0.0001);
     TS_ASSERT(!bc->hasLower());
   }
@@ -192,7 +158,7 @@ public:
     BoundaryConstraint *bc = dynamic_cast<BoundaryConstraint *>(c);
     TS_ASSERT(bc);
 
-    TS_ASSERT_EQUALS(bc->getParameterName(), "Sigma");
+    TS_ASSERT_EQUALS(bc->parameterName(), "Sigma");
     TS_ASSERT_DELTA(bc->upper(), 20, 0.0001);
     TS_ASSERT(!bc->hasLower());
 
@@ -201,7 +167,7 @@ public:
     bc = dynamic_cast<BoundaryConstraint *>(c);
     TS_ASSERT(bc);
 
-    TS_ASSERT_EQUALS(bc->getParameterName(), "Height");
+    TS_ASSERT_EQUALS(bc->parameterName(), "Height");
     TS_ASSERT_DELTA(bc->lower(), 1.3, 0.0001);
     TS_ASSERT_DELTA(bc->upper(), 3.4, 0.0001);
   }
diff --git a/Framework/CurveFitting/test/FunctionFactoryConstraintTest.h b/Framework/CurveFitting/test/FunctionFactoryConstraintTest.h
index 1a2aba36b775eb8a2fff21c66474ec01cc68aab8..b4083d4e9b7d694db1cec6fc0dc7a9ad15836d0c 100644
--- a/Framework/CurveFitting/test/FunctionFactoryConstraintTest.h
+++ b/Framework/CurveFitting/test/FunctionFactoryConstraintTest.h
@@ -349,8 +349,8 @@ public:
 
     TS_ASSERT(fun1->isFixed(0));
     TS_ASSERT(fun1->isFixed(1));
-    TS_ASSERT(fun1->isFixed(2));
-    TS_ASSERT(!fun1->isFixed(3));
+    TS_ASSERT(!fun1->isActive(2));
+    TS_ASSERT(fun1->isActive(3));
   }
 };
 
diff --git a/Framework/CurveFitting/test/Functions/CrystalFieldMultiSpectrumTest.h b/Framework/CurveFitting/test/Functions/CrystalFieldMultiSpectrumTest.h
index 5a8fff0339917e708fdec74900e29803172e996d..4656d63c7a60cc0013af393e219cae68645a27ad 100644
--- a/Framework/CurveFitting/test/Functions/CrystalFieldMultiSpectrumTest.h
+++ b/Framework/CurveFitting/test/Functions/CrystalFieldMultiSpectrumTest.h
@@ -94,6 +94,29 @@ public:
     TS_ASSERT_DELTA(fun.getParameter("f0.f3.FWHM"), 1.5, 1e-3);
   }
 
+  void test_evaluate_1() {
+    auto funStr = "name=CrystalFieldSpectrum,Ion=Ce,Temperature=44,"
+                  "ToleranceIntensity=0.001,B20=0.37737,B22=3.9770,"
+                  "B40=-0.031787,B42=-0.11611,B44=-0.12544,"
+                  "f0.FWHM=1.6,f1.FWHM=2.0,f2.FWHM=2.3";
+    auto ws = createWorkspace();
+    auto alg = AlgorithmFactory::Instance().create("EvaluateFunction", -1);
+    alg->initialize();
+    alg->setPropertyValue("Function", funStr);
+    alg->setProperty("InputWorkspace", ws);
+    alg->setProperty("OutputWorkspace", "out");
+    alg->execute();
+
+    auto out =
+        AnalysisDataService::Instance().retrieveWS<MatrixWorkspace>("out");
+    TS_ASSERT(out);
+    TS_ASSERT_EQUALS(out->getNumberHistograms(), 3);
+    TS_ASSERT_DELTA(out->readY(1)[0], 1.094 * c_mbsr, 0.001 * c_mbsr);
+    TS_ASSERT_DELTA(out->readY(1)[1], 0.738 * c_mbsr, 0.001 * c_mbsr);
+    TS_ASSERT_DELTA(out->readY(1)[2], 0.373 * c_mbsr, 0.001 * c_mbsr);
+    AnalysisDataService::Instance().clear();
+  }
+
   void test_evaluate() {
     auto funStr = "name=CrystalFieldMultiSpectrum,Ion=Ce,Temperatures=(44, "
                   "50),ToleranceIntensity=0.001,B20=0.37737,B22=3.9770,"
diff --git a/Framework/CurveFitting/test/Functions/CrystalFieldPeaksTest.h b/Framework/CurveFitting/test/Functions/CrystalFieldPeaksTest.h
index a7473a8b8ab0f38cabb4d65ab170e1bea00720ea..bf887a888ca94658b6f14e13e9e2eb4e24971313 100644
--- a/Framework/CurveFitting/test/Functions/CrystalFieldPeaksTest.h
+++ b/Framework/CurveFitting/test/Functions/CrystalFieldPeaksTest.h
@@ -492,7 +492,7 @@ public:
     TS_ASSERT(isFixed(fun, "B43"));
     TS_ASSERT(isFixed(fun, "IB43"));
     auto i = fun.parameterIndex("B44");
-    TS_ASSERT(fun.isFixed(i));
+    TS_ASSERT(!fun.isActive(i));
     TS_ASSERT(isFixed(fun, "IB44"));
 
     TS_ASSERT(!isFixed(fun, "B60"));
@@ -503,7 +503,7 @@ public:
     TS_ASSERT(isFixed(fun, "B63"));
     TS_ASSERT(isFixed(fun, "IB63"));
     i = fun.parameterIndex("B64");
-    TS_ASSERT(fun.isFixed(i));
+    TS_ASSERT(!fun.isActive(i));
     TS_ASSERT(isFixed(fun, "IB64"));
     TS_ASSERT(isFixed(fun, "B65"));
     TS_ASSERT(isFixed(fun, "IB65"));
diff --git a/Framework/CurveFitting/test/Functions/CrystalFieldSpectrumTest.h b/Framework/CurveFitting/test/Functions/CrystalFieldSpectrumTest.h
index abd7a8ae4ca724728a458eb254a483aaa208ba94..69c276617c77e4ad2c9d5fdcb940ffb32a936fed 100644
--- a/Framework/CurveFitting/test/Functions/CrystalFieldSpectrumTest.h
+++ b/Framework/CurveFitting/test/Functions/CrystalFieldSpectrumTest.h
@@ -214,15 +214,15 @@ public:
     auto constraint = fun->getConstraint(i);
     TS_ASSERT(constraint);
     if (constraint) {
-      TS_ASSERT_EQUALS(constraint->asString(), "1.3<FWHM");
-      TS_ASSERT_EQUALS(constraint->getIndex(), 2);
+      TS_ASSERT_EQUALS(constraint->asString(), "1.3<f1.FWHM");
+      TS_ASSERT_EQUALS(constraint->getLocalIndex(), 39);
     }
     i = fun->parameterIndex("B44");
     constraint = fun->getConstraint(i);
     TS_ASSERT(constraint);
     if (constraint) {
       TS_ASSERT_EQUALS(constraint->asString(), "0<B44<10");
-      TS_ASSERT_EQUALS(constraint->getIndex(), 13);
+      TS_ASSERT_EQUALS(constraint->getLocalIndex(), 13);
     }
   }
 
@@ -682,6 +682,165 @@ public:
     }
   }
 
+  void test_new_peaks() {
+    std::string funDef = "name=CrystalFieldSpectrum,Ion=Ce,Symmetry=C2v,"
+                         "Temperature=44.0,FWHM=1.1";
+    auto fun = FunctionFactory::Instance().createInitialized(funDef);
+    TS_ASSERT_EQUALS(fun->nParams(), 40);
+    TS_ASSERT_DELTA(fun->getParameter(34), 310.38, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(35), 0.00, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(36), 1.10, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(37), 0.00, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(38), 0.00, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(39), 1.10, 1e-2);
+    TS_ASSERT(fun->isActive(36));
+    TS_ASSERT(!fun->isActive(39));
+
+    fun->setParameter("B20", 0.37737);
+    fun->setParameter("B22", 3.977);
+    fun->setParameter("B40", 0.031787);
+    fun->setParameter("B42", -0.11611);
+
+    TS_ASSERT_EQUALS(fun->nParams(), 49);
+    TS_ASSERT_DELTA(fun->getParameter(34), 203.87, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(35), 0.00, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(36), 1.10, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(37), 86.29, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(38), 27.04, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(39), 1.10, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(40), 20.08, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(41), 44.24, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(42), 1.1, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(43), 0, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(44), 0, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(45), 1.1, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(46), 0, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(47), 0, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(48), 1.1, 1e-2);
+    TS_ASSERT(fun->isActive(36));
+    TS_ASSERT(fun->isActive(39));
+    TS_ASSERT(fun->isActive(42));
+    TS_ASSERT(!fun->isActive(45));
+    TS_ASSERT(!fun->isActive(48));
+
+    fun->setParameter("B20", 0);
+    fun->setParameter("B22", 0);
+    fun->setParameter("B40", 0);
+    fun->setParameter("B42", 0);
+
+    TS_ASSERT_EQUALS(fun->nParams(), 49);
+    TS_ASSERT_DELTA(fun->getParameter(34), 310.38, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(35), 0.00, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(36), 1.10, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(37), 0.00, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(38), 27.04, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(39), 1.10, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(40), 0.0, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(41), 44.24, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(42), 1.1, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(43), 0, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(44), 0, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(45), 1.1, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(46), 0, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(47), 0, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(48), 1.1, 1e-2);
+    TS_ASSERT(fun->isActive(36));
+    TS_ASSERT(!fun->isActive(39));
+    TS_ASSERT(!fun->isActive(42));
+    TS_ASSERT(!fun->isActive(45));
+    TS_ASSERT(!fun->isActive(48));
+  }
+
+  void test_new_peaks_fixed_peak_width() {
+    std::string funDef = "name=CrystalFieldSpectrum,Ion=Ce,Symmetry=C2v,"
+                         "Temperature=44.0,FWHM=1.1";
+    auto fun = FunctionFactory::Instance().createInitialized(funDef);
+    TS_ASSERT_EQUALS(fun->nParams(), 40);
+    TS_ASSERT(fun->isActive(36));
+    TS_ASSERT(!fun->isActive(39));
+
+    fun->setParameter("B20", 0.37737);
+    fun->setParameter("B22", 3.977);
+    fun->setParameter("B40", 0.031787);
+    fun->setParameter("B42", -0.11611);
+
+    fun->fix(39);
+
+    TS_ASSERT_EQUALS(fun->nParams(), 49);
+    TS_ASSERT(fun->isActive(36));
+    TS_ASSERT(!fun->isActive(39));
+    TS_ASSERT(fun->isActive(42));
+    TS_ASSERT(!fun->isActive(45));
+    TS_ASSERT(!fun->isActive(48));
+
+    fun->setParameter("B20", 0);
+    fun->setParameter("B22", 0);
+    fun->setParameter("B40", 0);
+    fun->setParameter("B42", 0);
+
+    TS_ASSERT(fun->isActive(36));
+    TS_ASSERT(!fun->isActive(39));
+    TS_ASSERT(!fun->isActive(42));
+    TS_ASSERT(!fun->isActive(45));
+    TS_ASSERT(!fun->isActive(48));
+
+    fun->setParameter("B20", 0.37737);
+    fun->setParameter("B22", 3.977);
+    fun->setParameter("B40", 0.031787);
+    fun->setParameter("B42", -0.11611);
+
+    TS_ASSERT(fun->isActive(36));
+    TS_ASSERT(!fun->isActive(39));
+    TS_ASSERT(fun->isActive(42));
+    TS_ASSERT(!fun->isActive(45));
+    TS_ASSERT(!fun->isActive(48));
+  }
+
+  void test_new_peaks_tied_peak_width() {
+    std::string funDef = "name=CrystalFieldSpectrum,Ion=Ce,Symmetry=C2v,"
+                         "Temperature=44.0,FWHM=1.1";
+    auto fun = FunctionFactory::Instance().createInitialized(funDef);
+    TS_ASSERT_EQUALS(fun->nParams(), 40);
+    TS_ASSERT(fun->isActive(36));
+    TS_ASSERT(!fun->isActive(39));
+
+    fun->setParameter("B20", 0.37737);
+    fun->setParameter("B22", 3.977);
+    fun->setParameter("B40", 0.031787);
+    fun->setParameter("B42", -0.11611);
+
+    fun->tie("f1.FWHM", "f0.FWHM");
+
+    TS_ASSERT_EQUALS(fun->nParams(), 49);
+    TS_ASSERT(fun->isActive(36));
+    TS_ASSERT(!fun->isActive(39));
+    TS_ASSERT(fun->isActive(42));
+    TS_ASSERT(!fun->isActive(45));
+    TS_ASSERT(!fun->isActive(48));
+
+    fun->setParameter("B20", 0);
+    fun->setParameter("B22", 0);
+    fun->setParameter("B40", 0);
+    fun->setParameter("B42", 0);
+
+    TS_ASSERT(fun->isActive(36));
+    TS_ASSERT(!fun->isActive(39));
+    TS_ASSERT(!fun->isActive(42));
+    TS_ASSERT(!fun->isActive(45));
+    TS_ASSERT(!fun->isActive(48));
+
+    fun->setParameter("B20", 0.37737);
+    fun->setParameter("B22", 3.977);
+    fun->setParameter("B40", 0.031787);
+    fun->setParameter("B42", -0.11611);
+
+    TS_ASSERT(fun->isActive(36));
+    TS_ASSERT(!fun->isActive(39));
+    TS_ASSERT(fun->isActive(42));
+    TS_ASSERT(!fun->isActive(45));
+    TS_ASSERT(!fun->isActive(48));
+  }
+
 private:
   std::pair<double, double> getBounds(API::IFunction &fun,
                                       const std::string &parName) {
diff --git a/Framework/DataHandling/inc/MantidDataHandling/LoadILLIndirect.h b/Framework/DataHandling/inc/MantidDataHandling/LoadILLIndirect.h
index f8a9036c5344a3779bb19bd50384804a89995d5f..88d4e811d22aa040b97b890812c51bf64e669f8e 100644
--- a/Framework/DataHandling/inc/MantidDataHandling/LoadILLIndirect.h
+++ b/Framework/DataHandling/inc/MantidDataHandling/LoadILLIndirect.h
@@ -2,6 +2,7 @@
 #define MANTID_DATAHANDLING_LOADILLINDIRECT_H_
 
 #include "MantidAPI/IFileLoader.h"
+#include "MantidAPI/DeprecatedAlgorithm.h"
 #include "MantidNexus/NexusClasses.h"
 #include "MantidDataHandling/LoadHelper.h"
 
@@ -33,7 +34,8 @@ namespace DataHandling {
   Code Documentation is available at: <http://doxygen.mantidproject.org>
 */
 class DLLExport LoadILLIndirect
-    : public API::IFileLoader<Kernel::NexusDescriptor> {
+    : public API::IFileLoader<Kernel::NexusDescriptor>,
+      public API::DeprecatedAlgorithm {
 public:
   LoadILLIndirect();
   /// Returns a confidence value that this algorithm can load a file
diff --git a/Framework/DataHandling/inc/MantidDataHandling/LoadILLTOF2.h b/Framework/DataHandling/inc/MantidDataHandling/LoadILLTOF2.h
index ca7c8b2f328ed1e4df75dcd6f9878e4a9c812ea7..30ad9480441c387a90d401036465d13770d2b027 100644
--- a/Framework/DataHandling/inc/MantidDataHandling/LoadILLTOF2.h
+++ b/Framework/DataHandling/inc/MantidDataHandling/LoadILLTOF2.h
@@ -74,7 +74,7 @@ private:
                                 const std::vector<std::vector<int>> &);
   void loadSpectra(size_t &spec, const size_t numberOfTubes,
                    const std::vector<Mantid::detid_t> &detectorIDs,
-                   NeXus::NXInt data, Mantid::API::Progress progress);
+                   const NeXus::NXInt &data, Mantid::API::Progress &progress);
 
   void runLoadInstrument();
 
diff --git a/Framework/DataHandling/src/LoadHelper.cpp b/Framework/DataHandling/src/LoadHelper.cpp
index f87b80eeb4194953f14e2ee6d50800acf80e14a6..f5b7ede1fe357615f5d80ad667ea5fcd709b4b64 100644
--- a/Framework/DataHandling/src/LoadHelper.cpp
+++ b/Framework/DataHandling/src/LoadHelper.cpp
@@ -205,17 +205,21 @@ void LoadHelper::recurseAndAddNexusFieldsToWsRun(NXhandle nxfileID,
 
       NXstatus opengroup_status;
       NXstatus opendata_status;
+      NXstatus getinfo_status;
 
       if ((opengroup_status = NXopengroup(nxfileID, nxname, nxclass)) ==
           NX_OK) {
 
-        // Go down to one level
-        std::string p_nxname(
-            nxname); // current names can be useful for next level
-        std::string p_nxclass(nxclass);
+        if (std::string(nxclass) != "ILL_data_scan_vars") {
 
-        recurseAndAddNexusFieldsToWsRun(nxfileID, runDetails, p_nxname,
-                                        p_nxclass, level + 1);
+          // Go down to one level, if the group is known to nexus
+          std::string p_nxname(
+              nxname); // current names can be useful for next level
+          std::string p_nxclass(nxclass);
+
+          recurseAndAddNexusFieldsToWsRun(nxfileID, runDetails, p_nxname,
+                                          p_nxclass, level + 1);
+        }
 
         NXclosegroup(nxfileID);
       } // if(NXopengroup
@@ -229,10 +233,9 @@ void LoadHelper::recurseAndAddNexusFieldsToWsRun(NXhandle nxfileID,
                         << nxname << ")\n";
           /* nothing */
         } else { // create a property
-          int rank;
-          int dims[4];
+          int rank = 0;
+          int dims[4] = {0, 0, 0, 0};
           int type;
-          dims[0] = dims[1] = dims[2] = dims[3] = 0;
 
           std::string property_name =
               (parent_name.empty() ? nxname : parent_name + "." + nxname);
@@ -241,133 +244,167 @@ void LoadHelper::recurseAndAddNexusFieldsToWsRun(NXhandle nxfileID,
                         << property_name << '\n';
 
           // Get the value
-          NXgetinfo(nxfileID, &rank, dims, &type);
+          if ((getinfo_status = NXgetinfo(nxfileID, &rank, dims, &type)) ==
+              NX_OK) {
+
+            g_log.debug() << indent_str << "Rank of " << property_name << " is "
+                          << rank << "\n" << indent_str << "Dimensions are "
+                          << dims[0] << ", " << dims[1] << ", " << dims[2]
+                          << ", " << dims[3] << "\n";
 
-          // Note, we choose to only build properties on small float arrays
-          // filter logic is below
-          bool build_small_float_array = false; // default
+            // Note, we choose to only build properties on small float arrays
+            // filter logic is below
+            bool build_small_float_array = false; // default
+            bool read_property = true;
 
-          if ((type == NX_FLOAT32) || (type == NX_FLOAT64)) {
-            if ((rank == 1) && (dims[0] <= 9)) {
-              build_small_float_array = true;
+            if ((type == NX_FLOAT32) || (type == NX_FLOAT64)) {
+              if ((rank == 1) && (dims[0] <= 9)) {
+                build_small_float_array = true;
+              } else {
+                g_log.debug() << indent_str
+                              << "ignored multi dimensional number "
+                                 "data with more than 10 elements "
+                              << property_name << '\n';
+                read_property = false;
+              }
+            } else if (type != NX_CHAR) {
+              if ((rank > 1) || (dims[0] > 1) || (dims[1] > 1) ||
+                  (dims[2] > 1) || (dims[3] > 1)) {
+                g_log.debug() << indent_str
+                              << "ignored non-scalar numeric data on "
+                              << property_name << '\n';
+                read_property = false;
+              }
             } else {
-              g_log.debug() << indent_str
-                            << "ignored multi dimension float data on "
-                            << property_name << '\n';
-            }
-          } else if (type != NX_CHAR) {
-            if ((rank != 1) || (dims[0] != 1) || (dims[1] != 1) ||
-                (dims[2] != 1) || (dims[3] != 1)) {
-              g_log.debug() << indent_str << "ignored multi dimension data on "
-                            << property_name << '\n';
+              if ((rank > 1) || (dims[1] > 1) || (dims[2] > 1) ||
+                  (dims[3] > 1)) {
+                g_log.debug() << indent_str << "ignored string array data on "
+                              << property_name << '\n';
+                read_property = false;
+              }
             }
-          }
 
-          void *dataBuffer;
-          NXmalloc(&dataBuffer, rank, dims, type);
+            if (read_property) {
 
-          if (NXgetdata(nxfileID, dataBuffer) != NX_OK) {
-            NXfree(&dataBuffer);
-            throw std::runtime_error("Cannot read data from NeXus file");
-          }
+              void *dataBuffer;
+              NXmalloc(&dataBuffer, rank, dims, type);
 
-          if (type == NX_CHAR) {
-            std::string property_value(
-                reinterpret_cast<const char *>(dataBuffer));
-            if (boost::algorithm::ends_with(property_name, "_time")) {
-              // That's a time value! Convert to Mantid standard
-              property_value = dateTimeInIsoFormat(property_value);
-            }
-            runDetails.addProperty(property_name, property_value);
-
-          } else if ((type == NX_FLOAT32) || (type == NX_FLOAT64) ||
-                     (type == NX_INT16) || (type == NX_INT32) ||
-                     (type == NX_UINT16)) {
-
-            // Look for "units"
-            NXstatus units_status;
-            char units_sbuf[NX_MAXNAMELEN];
-            int units_len = NX_MAXNAMELEN;
-            int units_type = NX_CHAR;
-
-            char unitsAttrName[] = "units";
-            units_status = NXgetattr(nxfileID, unitsAttrName, units_sbuf,
-                                     &units_len, &units_type);
-            if (units_status != NX_ERROR) {
-              g_log.debug() << indent_str << "[ " << property_name
-                            << " has unit " << units_sbuf << " ]\n";
-            }
+              if (NXgetdata(nxfileID, dataBuffer) == NX_OK) {
 
-            if ((type == NX_FLOAT32) || (type == NX_FLOAT64)) {
-              // Mantid numerical properties are double only.
-              double property_double_value = 0.0;
-
-              // Simple case, one value
-              if (dims[0] == 1) {
-                if (type == NX_FLOAT32) {
-                  property_double_value =
-                      *(reinterpret_cast<float *>(dataBuffer));
-                } else if (type == NX_FLOAT64) {
-                  property_double_value =
-                      *(reinterpret_cast<double *>(dataBuffer));
-                }
-                if (units_status != NX_ERROR)
-                  runDetails.addProperty(property_name, property_double_value,
-                                         std::string(units_sbuf));
-                else
-                  runDetails.addProperty(property_name, property_double_value);
-              } else if (build_small_float_array) {
-                // An array, converted to "name_index", with index < 10 (see
-                // test above)
-                for (int dim_index = 0; dim_index < dims[0]; dim_index++) {
-                  if (type == NX_FLOAT32) {
-                    property_double_value =
-                        (reinterpret_cast<float *>(dataBuffer))[dim_index];
-                  } else if (type == NX_FLOAT64) {
-                    property_double_value =
-                        (reinterpret_cast<double *>(dataBuffer))[dim_index];
+                if (type == NX_CHAR) {
+                  std::string property_value(
+                      reinterpret_cast<const char *>(dataBuffer));
+                  if (boost::algorithm::ends_with(property_name, "_time")) {
+                    // That's a time value! Convert to Mantid standard
+                    property_value = dateTimeInIsoFormat(property_value);
+                  }
+                  runDetails.addProperty(property_name, property_value);
+
+                } else if ((type == NX_FLOAT32) || (type == NX_FLOAT64) ||
+                           (type == NX_INT16) || (type == NX_INT32) ||
+                           (type == NX_UINT16)) {
+
+                  // Look for "units"
+                  NXstatus units_status;
+                  char units_sbuf[NX_MAXNAMELEN];
+                  int units_len = NX_MAXNAMELEN;
+                  int units_type = NX_CHAR;
+
+                  char unitsAttrName[] = "units";
+                  units_status = NXgetattr(nxfileID, unitsAttrName, units_sbuf,
+                                           &units_len, &units_type);
+                  if (units_status != NX_ERROR) {
+                    g_log.debug() << indent_str << "[ " << property_name
+                                  << " has unit " << units_sbuf << " ]\n";
                   }
-                  std::string indexed_property_name = property_name +
-                                                      std::string("_") +
-                                                      std::to_string(dim_index);
-                  if (units_status != NX_ERROR)
-                    runDetails.addProperty(indexed_property_name,
-                                           property_double_value,
-                                           std::string(units_sbuf));
-                  else
-                    runDetails.addProperty(indexed_property_name,
-                                           property_double_value);
-                }
-              }
 
-            } else {
-              // int case
-              int property_int_value = 0;
-              if (type == NX_INT16) {
-                property_int_value =
-                    *(reinterpret_cast<short int *>(dataBuffer));
-              } else if (type == NX_INT32) {
-                property_int_value = *(reinterpret_cast<int *>(dataBuffer));
-              } else if (type == NX_UINT16) {
-                property_int_value =
-                    *(reinterpret_cast<short unsigned int *>(dataBuffer));
+                  if ((type == NX_FLOAT32) || (type == NX_FLOAT64)) {
+                    // Mantid numerical properties are double only.
+                    double property_double_value = 0.0;
+
+                    // Simple case, one value
+                    if (dims[0] == 1) {
+                      if (type == NX_FLOAT32) {
+                        property_double_value =
+                            *(reinterpret_cast<float *>(dataBuffer));
+                      } else if (type == NX_FLOAT64) {
+                        property_double_value =
+                            *(reinterpret_cast<double *>(dataBuffer));
+                      }
+                      if (units_status != NX_ERROR)
+                        runDetails.addProperty(property_name,
+                                               property_double_value,
+                                               std::string(units_sbuf));
+                      else
+                        runDetails.addProperty(property_name,
+                                               property_double_value);
+                    } else if (build_small_float_array) {
+                      // An array, converted to "name_index", with index < 10
+                      // (see
+                      // test above)
+                      for (int dim_index = 0; dim_index < dims[0];
+                           dim_index++) {
+                        if (type == NX_FLOAT32) {
+                          property_double_value = (reinterpret_cast<float *>(
+                              dataBuffer))[dim_index];
+                        } else if (type == NX_FLOAT64) {
+                          property_double_value = (reinterpret_cast<double *>(
+                              dataBuffer))[dim_index];
+                        }
+                        std::string indexed_property_name =
+                            property_name + std::string("_") +
+                            std::to_string(dim_index);
+                        if (units_status != NX_ERROR)
+                          runDetails.addProperty(indexed_property_name,
+                                                 property_double_value,
+                                                 std::string(units_sbuf));
+                        else
+                          runDetails.addProperty(indexed_property_name,
+                                                 property_double_value);
+                      }
+                    }
+
+                  } else {
+                    // int case
+                    int property_int_value = 0;
+                    if (type == NX_INT16) {
+                      property_int_value =
+                          *(reinterpret_cast<short int *>(dataBuffer));
+                    } else if (type == NX_INT32) {
+                      property_int_value =
+                          *(reinterpret_cast<int *>(dataBuffer));
+                    } else if (type == NX_UINT16) {
+                      property_int_value =
+                          *(reinterpret_cast<short unsigned int *>(dataBuffer));
+                    }
+
+                    if (units_status != NX_ERROR)
+                      runDetails.addProperty(property_name, property_int_value,
+                                             std::string(units_sbuf));
+                    else
+                      runDetails.addProperty(property_name, property_int_value);
+
+                  } // if (type==...
+
+                } else {
+                  g_log.debug() << indent_str << "unexpected data on "
+                                << property_name << '\n';
+                } // test on nxdata type
+
+              } else {
+                g_log.debug() << indent_str << "could not read the value of "
+                              << property_name << '\n';
               }
 
-              if (units_status != NX_ERROR)
-                runDetails.addProperty(property_name, property_int_value,
-                                       std::string(units_sbuf));
-              else
-                runDetails.addProperty(property_name, property_int_value);
-
-            } // if (type==...
-
-          } else {
-            g_log.debug() << indent_str << "unexpected data on "
-                          << property_name << '\n';
-          } // test on nxdata type
+              NXfree(&dataBuffer);
+              dataBuffer = nullptr;
+            }
 
-          NXfree(&dataBuffer);
-          dataBuffer = nullptr;
+          } // if NXgetinfo OK
+          else {
+            g_log.debug() << indent_str << "unexpected status ("
+                          << getinfo_status << ") on " << nxname << '\n';
+          }
 
         } // if (parent_class == "NXData" || parent_class == "NXMonitor") else
 
diff --git a/Framework/DataHandling/src/LoadILLIndirect.cpp b/Framework/DataHandling/src/LoadILLIndirect.cpp
index 769f3491a7c9563515f9e3d0aa200d9087680b66..206e6a9342656e3cc9d4dd0e05c0b969e2f0e390 100644
--- a/Framework/DataHandling/src/LoadILLIndirect.cpp
+++ b/Framework/DataHandling/src/LoadILLIndirect.cpp
@@ -32,6 +32,8 @@ LoadILLIndirect::LoadILLIndirect()
       m_numberOfPixelsPerTube(0), m_numberOfChannels(0),
       m_numberOfSimpleDetectors(0), m_numberOfHistograms(0) {
   m_supportedInstruments.emplace_back("IN16B");
+  useAlgorithm("LoadILLIndirect", 2);
+  deprecatedDate("01.04.2017");
 }
 
 //----------------------------------------------------------------------------------------------
diff --git a/Framework/DataHandling/src/LoadILLTOF2.cpp b/Framework/DataHandling/src/LoadILLTOF2.cpp
index 0ae774efd2026c43b9ad8b036b8b0741e8d55ec2..5ff8014cd7b65713faefffdae252011d216ecb20 100644
--- a/Framework/DataHandling/src/LoadILLTOF2.cpp
+++ b/Framework/DataHandling/src/LoadILLTOF2.cpp
@@ -404,7 +404,7 @@ void LoadILLTOF2::loadDataIntoTheWorkSpace(
  */
 void LoadILLTOF2::loadSpectra(size_t &spec, const size_t numberOfTubes,
                               const std::vector<detid_t> &detectorIDs,
-                              NXInt data, Progress progress) {
+                              const NXInt &data, Progress &progress) {
   for (size_t i = 0; i < numberOfTubes; ++i) {
     for (size_t j = 0; j < m_numberOfPixelsPerTube; ++j) {
       int *data_p = &data(static_cast<int>(i), static_cast<int>(j), 0);
diff --git a/Framework/Geometry/inc/MantidGeometry/Crystal/PointGroup.h b/Framework/Geometry/inc/MantidGeometry/Crystal/PointGroup.h
index 936cc621a2047bed32512d355d41e91e61b006d7..2032ca0603b9d75aed9522717e0e0445138f594a 100644
--- a/Framework/Geometry/inc/MantidGeometry/Crystal/PointGroup.h
+++ b/Framework/Geometry/inc/MantidGeometry/Crystal/PointGroup.h
@@ -63,7 +63,7 @@ public:
   Kernel::V3D getReflectionFamily(const Kernel::V3D &hkl) const;
 
 protected:
-  std::vector<Kernel::V3D> getEquivalentSet(const Kernel::V3D &hkl) const;
+  std::vector<Kernel::V3D> getAllEquivalents(const Kernel::V3D &hkl) const;
 
   CrystalSystem getCrystalSystemFromGroup() const;
   LatticeSystem getLatticeSystemFromCrystalSystemAndGroup(
diff --git a/Framework/Geometry/inc/MantidGeometry/Crystal/ReflectionCondition.h b/Framework/Geometry/inc/MantidGeometry/Crystal/ReflectionCondition.h
index 80e26f9a58149275abfd84643adf23d0ecec563e..3eb01e35222b08a2e55dc6298bac78ac20214849 100644
--- a/Framework/Geometry/inc/MantidGeometry/Crystal/ReflectionCondition.h
+++ b/Framework/Geometry/inc/MantidGeometry/Crystal/ReflectionCondition.h
@@ -172,6 +172,12 @@ typedef boost::shared_ptr<ReflectionCondition> ReflectionCondition_sptr;
 
 MANTID_GEOMETRY_DLL std::vector<ReflectionCondition_sptr>
 getAllReflectionConditions();
+MANTID_GEOMETRY_DLL std::vector<std::string> getAllReflectionConditionNames();
+MANTID_GEOMETRY_DLL std::vector<std::string> getAllReflectionConditionSymbols();
+MANTID_GEOMETRY_DLL ReflectionCondition_sptr
+getReflectionConditionByName(const std::string &name);
+MANTID_GEOMETRY_DLL ReflectionCondition_sptr
+getReflectionConditionBySymbol(const std::string &symbol);
 
 } // namespace Mantid
 } // namespace Geometry
diff --git a/Framework/Geometry/src/Crystal/PointGroup.cpp b/Framework/Geometry/src/Crystal/PointGroup.cpp
index cb22e404cbbe09b3b9ebca272ae4152328d567ff..dedfda48c6dd29eabe32f511b86593ab09eb017c 100644
--- a/Framework/Geometry/src/Crystal/PointGroup.cpp
+++ b/Framework/Geometry/src/Crystal/PointGroup.cpp
@@ -31,7 +31,14 @@ using Kernel::IntMatrix;
  * @return :: std::vector containing all equivalent hkls.
  */
 std::vector<V3D> PointGroup::getEquivalents(const V3D &hkl) const {
-  return getEquivalentSet(hkl);
+  auto equivalents = getAllEquivalents(hkl);
+
+  std::sort(equivalents.begin(), equivalents.end(), std::greater<V3D>());
+
+  equivalents.erase(std::unique(equivalents.begin(), equivalents.end()),
+                    equivalents.end());
+
+  return equivalents;
 }
 
 /**
@@ -48,7 +55,9 @@ std::vector<V3D> PointGroup::getEquivalents(const V3D &hkl) const {
  * @return :: hkl specific to a family of index-triplets
  */
 V3D PointGroup::getReflectionFamily(const Kernel::V3D &hkl) const {
-  return *getEquivalentSet(hkl).begin();
+  auto equivalents = getAllEquivalents(hkl);
+
+  return *std::max_element(equivalents.begin(), equivalents.end());
 }
 
 /// Protected constructor - can not be used directly.
@@ -65,9 +74,9 @@ std::string PointGroup::getSymbol() const { return m_symbolHM; }
 
 bool PointGroup::isEquivalent(const Kernel::V3D &hkl,
                               const Kernel::V3D &hkl2) const {
-  std::vector<V3D> hklEquivalents = getEquivalentSet(hkl);
+  auto hklEquivalents = getAllEquivalents(hkl);
 
-  return (std::find(hklEquivalents.begin(), hklEquivalents.end(), hkl2) !=
+  return (std::find(hklEquivalents.cbegin(), hklEquivalents.cend(), hkl2) !=
           hklEquivalents.end());
 }
 
@@ -75,29 +84,24 @@ bool PointGroup::isEquivalent(const Kernel::V3D &hkl,
  * Generates a set of hkls
  *
  * This method applies all transformation matrices to the supplied hkl and puts
- * it into a set, which is returned in the end. Using a set ensures that each
- * hkl occurs once and only once. This set is the set of equivalent hkls,
- * specific to a concrete point group.
+ * them into a vector, which is returned in the end. For special reflections
+ * such as 100 or 110 or 111, the vector may contain duplicates that need to
+ * be filtered out.
  *
  * The symmetry operations need to be set prior to calling this method by a call
  * to PointGroup::setTransformationMatrices.
  *
  * @param hkl :: Arbitrary hkl
- * @return :: set of hkls.
+ * @return :: vector of hkls.
  */
-std::vector<V3D> PointGroup::getEquivalentSet(const Kernel::V3D &hkl) const {
+std::vector<V3D> PointGroup::getAllEquivalents(const Kernel::V3D &hkl) const {
   std::vector<V3D> equivalents;
   equivalents.reserve(m_allOperations.size());
 
   for (const auto &operation : m_allOperations) {
-    equivalents.push_back(operation.transformHKL(hkl));
+    equivalents.emplace_back(operation.transformHKL(hkl));
   }
 
-  std::sort(equivalents.begin(), equivalents.end(), std::greater<V3D>());
-
-  equivalents.erase(std::unique(equivalents.begin(), equivalents.end()),
-                    equivalents.end());
-
   return equivalents;
 }
 
diff --git a/Framework/Geometry/src/Crystal/ReflectionCondition.cpp b/Framework/Geometry/src/Crystal/ReflectionCondition.cpp
index eb9001c92a8c181e0ad316edc59a0a137d3c1037..dc787b9596593929d9a7ce22794c5ef6a1d85cad 100644
--- a/Framework/Geometry/src/Crystal/ReflectionCondition.cpp
+++ b/Framework/Geometry/src/Crystal/ReflectionCondition.cpp
@@ -1,5 +1,6 @@
 #include "MantidGeometry/Crystal/ReflectionCondition.h"
 #include "MantidKernel/System.h"
+#include <algorithm>
 
 namespace Mantid {
 namespace Geometry {
@@ -26,5 +27,82 @@ std::vector<ReflectionCondition_sptr> getAllReflectionConditions() {
   return out;
 }
 
+/// Helper function that transforms all ReflectionConditions to strings.
+std::vector<std::string> transformReflectionConditions(
+    const std::function<std::string(const ReflectionCondition_sptr &)> &fn) {
+  auto conditions = getAllReflectionConditions();
+
+  std::vector<std::string> names;
+  std::transform(conditions.cbegin(), conditions.cend(),
+                 std::back_inserter(names), fn);
+
+  return names;
+}
+
+/// Returns all ReflectionCondition names.
+std::vector<std::string> getAllReflectionConditionNames() {
+  return transformReflectionConditions(
+      [](const ReflectionCondition_sptr &condition) {
+        return condition->getName();
+      });
+}
+
+/// Returns all centering symbols.
+std::vector<std::string> getAllReflectionConditionSymbols() {
+  return transformReflectionConditions(
+      [](const ReflectionCondition_sptr &condition) {
+        return condition->getSymbol();
+      });
+}
+
+/**
+ * @brief Returns a reflection condition according to a filter function
+ *
+ * This small helper function returns a ReflectionCondition_sptr for which
+ * the supplied function returns true. If no ReflectionCondition is found,
+ * an std::invalid_argument exception is thrown. The message of the exception
+ * contains the hint-parameter, which could be string that was used as a
+ * matching criterion to find the ReflectionCondition.
+ *
+ * @param fn :: Unary predicate for matching ReflectionCondition
+ * @param hint :: Hint to include in exception message. Name or symbol.
+ * @return ReflectionCondition for which fn matches.
+ */
+ReflectionCondition_sptr getReflectionConditionWhere(
+    const std::function<bool(const ReflectionCondition_sptr &)> &fn,
+    const std::string &hint) {
+  auto conditions = getAllReflectionConditions();
+
+  auto it = std::find_if(conditions.cbegin(), conditions.cend(), fn);
+
+  if (it == conditions.cend()) {
+    throw std::invalid_argument("No ReflectionCondition found that matches '" +
+                                hint + "'.");
+  }
+
+  return *it;
+}
+
+/// Returns the requested ReflectionCondition, see
+/// getAllReflectionConditionNames for possible names.
+ReflectionCondition_sptr getReflectionConditionByName(const std::string &name) {
+  return getReflectionConditionWhere(
+      [=](const ReflectionCondition_sptr &condition) {
+        return condition->getName() == name;
+      },
+      name);
+}
+
+/// Returns the ReflectionCondition for the specified centering symbol, see
+/// getAllReflectionConditionSymbols for possible symbols.
+ReflectionCondition_sptr
+getReflectionConditionBySymbol(const std::string &symbol) {
+  return getReflectionConditionWhere(
+      [=](const ReflectionCondition_sptr &condition) {
+        return condition->getSymbol() == symbol;
+      },
+      symbol);
+}
+
 } // namespace Mantid
 } // namespace Geometry
diff --git a/Framework/Geometry/src/Instrument/Detector.cpp b/Framework/Geometry/src/Instrument/Detector.cpp
index dab28633c3cb5dc6097f0e94048ee5e5f33d34e3..48fb57c03c3cba31097841c7fad0da714f9279e3 100644
--- a/Framework/Geometry/src/Instrument/Detector.cpp
+++ b/Framework/Geometry/src/Instrument/Detector.cpp
@@ -92,13 +92,16 @@ double Detector::getSignedTwoTheta(const V3D &observer, const V3D &axis,
   return angle;
 }
 
-/// Get the phi angle between the detector with reference to the origin
-///@return The angle
+/** Get the phi angle between the detector with reference to the origin
+ * This function will not be supported in Instrument-2.0 due to its ambiguity.
+ * DO NOT USE IN NEW CODE
+ * @return The angle
+ */
 double Detector::getPhi() const {
-  double phi = 0.0, dummy;
-  this->getPos().getSpherical(dummy, dummy, phi);
-  return phi * M_PI / 180.0;
+  const Kernel::V3D pos = this->getPos();
+  return std::atan2(pos[1], pos[0]);
 }
+
 /**
  * Calculate the phi angle between detector and beam, and then offset.
  * @param offset in radians
diff --git a/Framework/Geometry/test/ReflectionConditionTest.h b/Framework/Geometry/test/ReflectionConditionTest.h
index 58875720487a3e13c50dd7a87cd2d7e6722f9e97..c8841ae0fee41586c4868b1a3a63d6d9acdd669d 100644
--- a/Framework/Geometry/test/ReflectionConditionTest.h
+++ b/Framework/Geometry/test/ReflectionConditionTest.h
@@ -68,6 +68,54 @@ public:
     // All centering symbols are present if the set is empty.
     TS_ASSERT_EQUALS(centeringSymbols.size(), 0);
   }
+
+  void test_getReflectionConditionNames() {
+    auto conditions = getAllReflectionConditions();
+    auto names = getAllReflectionConditionNames();
+
+    TS_ASSERT_EQUALS(conditions.size(), names.size());
+
+    // there should not be any duplicates in the names
+    std::unordered_set<std::string> nameSet(names.begin(), names.end());
+
+    TS_ASSERT_EQUALS(nameSet.size(), names.size())
+  }
+
+  void test_getReflectionConditionSymbols() {
+    auto conditions = getAllReflectionConditions();
+    auto symbols = getAllReflectionConditionSymbols();
+
+    TS_ASSERT_EQUALS(conditions.size(), symbols.size());
+
+    // there should not be any duplicates in the names
+    std::unordered_set<std::string> symbolSet(symbols.begin(), symbols.end());
+
+    TS_ASSERT_EQUALS(symbolSet.size(), symbols.size())
+  }
+
+  void test_getReflectionConditionByName() {
+    auto names = getAllReflectionConditionNames();
+
+    for (auto name : names) {
+      TSM_ASSERT_THROWS_NOTHING("Problem with ReflectionCondition: " + name,
+                                getReflectionConditionByName(name));
+    }
+
+    TS_ASSERT_THROWS(getReflectionConditionByName("invalid"),
+                     std::invalid_argument);
+  }
+
+  void test_getReflectionConditionBySymbol() {
+    auto symbols = getAllReflectionConditionSymbols();
+
+    for (auto symbol : symbols) {
+      TSM_ASSERT_THROWS_NOTHING("Problem with ReflectionCondition: " + symbol,
+                                getReflectionConditionBySymbol(symbol));
+    }
+
+    TS_ASSERT_THROWS(getReflectionConditionBySymbol("Q"),
+                     std::invalid_argument);
+  }
 };
 
 #endif /* MANTID_GEOMETRY_REFLECTIONCONDITIONTEST_H_ */
diff --git a/Framework/Kernel/CMakeLists.txt b/Framework/Kernel/CMakeLists.txt
index 12b62e27fb4911a45ef790318ed8230eb6c0ed8d..e7231730b93505f501c7d9d16d96687018db5f3d 100644
--- a/Framework/Kernel/CMakeLists.txt
+++ b/Framework/Kernel/CMakeLists.txt
@@ -236,6 +236,7 @@ set ( INC_FILES
 	inc/MantidKernel/NDPseudoRandomNumberGenerator.h
 	inc/MantidKernel/NDRandomNumberGenerator.h
 	inc/MantidKernel/NetworkProxy.h
+        inc/MantidKernel/NearestNeighbours.h
 	inc/MantidKernel/NeutronAtom.h
 	inc/MantidKernel/NexusDescriptor.h
 	inc/MantidKernel/NormalDistribution.h
@@ -388,6 +389,7 @@ set ( TEST_FILES
 	MutexTest.h
 	NDPseudoRandomNumberGeneratorTest.h
 	NDRandomNumberGeneratorTest.h
+        NearestNeighboursTest.h
 	NeutronAtomTest.h
 	NexusDescriptorTest.h
 	NormalDistributionTest.h
diff --git a/Framework/Kernel/inc/MantidKernel/Exception.h b/Framework/Kernel/inc/MantidKernel/Exception.h
index 448f840938e5557997b3c7b864e812b7aebf6ff3..369597d468aea5a6c2ec5799c7e3cb76eb7350db 100644
--- a/Framework/Kernel/inc/MantidKernel/Exception.h
+++ b/Framework/Kernel/inc/MantidKernel/Exception.h
@@ -4,9 +4,9 @@
 //----------------------------------------------------------------------
 // Includes
 //----------------------------------------------------------------------
+#include "MantidKernel/DllConfig.h"
 #include <stdexcept>
 #include <string>
-#include "MantidKernel/DllConfig.h"
 
 namespace Mantid {
 namespace Kernel {
@@ -347,6 +347,17 @@ public:
   const int &errorCode() const;
 };
 
+/// Exception thrown when a fitting function changes number of parameters
+/// during fit.
+class MANTID_KERNEL_DLL FitSizeWarning final : public std::exception {
+  std::string m_message;
+
+public:
+  explicit FitSizeWarning(size_t oldSize);
+  FitSizeWarning(size_t oldSize, size_t newSize);
+  const char *what() const noexcept override;
+};
+
 } // namespace Exception
 } // namespace Kernel
 } // namespace Mantid
diff --git a/Framework/Kernel/inc/MantidKernel/MultiThreaded.h b/Framework/Kernel/inc/MantidKernel/MultiThreaded.h
index 559214a637336fc106092a910743c8531e1870a5..8a9c9c50591cee8203cba00ee1a1c1079d4d1dac 100644
--- a/Framework/Kernel/inc/MantidKernel/MultiThreaded.h
+++ b/Framework/Kernel/inc/MantidKernel/MultiThreaded.h
@@ -59,6 +59,12 @@ threadSafe(const Arg &workspace, Args &&... others) {
   return workspace.threadSafe() && threadSafe(std::forward<Args>(others)...);
 }
 
+/** Uses std::compare_exchange_weak to update the atomic value f = op(f, d)
+ * Used to improve parallel scaling in algorithms MDNormDirectSC and MDNormSCD
+ * @param f atomic variable being updated
+ * @param d second element in binary operation
+ * @param op binary operation on elements f and d
+ */
 template <typename T, typename BinaryOp>
 void AtomicOp(std::atomic<T> &f, T d, BinaryOp op) {
   T old = f.load();
diff --git a/Framework/Kernel/inc/MantidKernel/NearestNeighbours.h b/Framework/Kernel/inc/MantidKernel/NearestNeighbours.h
new file mode 100644
index 0000000000000000000000000000000000000000..39c823e956eeb1d0bd6a638f957fe78e9e95f1cd
--- /dev/null
+++ b/Framework/Kernel/inc/MantidKernel/NearestNeighbours.h
@@ -0,0 +1,196 @@
+#ifndef MANTID_KERNEL_NEARESTNEIGHBOURS_H_
+#define MANTID_KERNEL_NEARESTNEIGHBOURS_H_
+
+#include "MantidKernel/DllConfig.h"
+#include "MantidKernel/ANN/ANN.h"
+#include "MantidKernel/make_unique.h"
+
+#include <Eigen/Core>
+#include <vector>
+
+/**
+  NearestNeighbours is a thin wrapper class around the ANN library for finding
+  the k nearest neighbours.
+
+  Given a vector of Eigen::Vectors this class will generate a KDTree. The tree
+  can then be interrogated to find the closest k neighbours to a given position.
+
+  This classes is templated with a parameter N which defines the dimensionality
+  of the vector type used. i.e. if N = 3 then Eigen::Vector3d is used.
+
+  @author Samuel Jackson
+  @date 2017
+
+  Copyright &copy; 2016 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge
+  National Laboratory & European Spallation Source
+
+  This file is part of Mantid.
+
+  Mantid is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  Mantid is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+  File change history is stored at: <https://github.com/mantidproject/mantid>
+  Code Documentation is available at: <http://doxygen.mantidproject.org>
+*/
+
+namespace Mantid {
+namespace Kernel {
+
+//------------------------------------------------------------------------------
+// Helper classes
+//------------------------------------------------------------------------------
+
+/**
+ * NNDataPoints is a thin RAII wrapper class around the ANNpointArray type. This
+ * takes care of the proper allocation and deallocation of memory.
+ */
+class NNDataPoints {
+public:
+  /** Construct a new set of data points
+   *
+   * @param nPts :: the number of data points
+   * @param nElems :: the number of elements for each point
+   */
+  NNDataPoints(const int nPts, const int nElems) : m_nPts(nPts) {
+    m_data = annAllocPts(m_nPts, nElems);
+  }
+
+  ~NNDataPoints() { annDeallocPts(m_data); }
+
+  /** Return a handle to the raw ANNpointArray wrapped by this class
+   *
+   * @return handle to the raw ANNpointArray
+   */
+  ANNpointArray rawData() { return m_data; }
+
+  /** Access a raw point in the collection of points
+   *
+   * This will check the index used is within bounds and return nullptr if
+   * outside of those bounds
+   *
+   * @param i :: the index of the point to return a handle to
+   * @return handle to a single point in the collection of points
+   */
+  ANNcoord *mutablePoint(const int i) {
+    if (i < m_nPts)
+      return m_data[i];
+    else
+      return nullptr;
+  }
+
+private:
+  /// Number of points stored
+  const int m_nPts;
+  /// Array of points for use with NN search
+  ANNpointArray m_data;
+};
+
+//------------------------------------------------------------------------------
+// NearestNeighbours implementation
+//------------------------------------------------------------------------------
+
+template <size_t N = 3> class DLLExport NearestNeighbours {
+
+public:
+  // typedefs for code brevity
+  typedef Eigen::Matrix<double, N, 1> VectorType;
+  typedef std::vector<std::tuple<VectorType, size_t, double>>
+      NearestNeighbourResults;
+
+  /** Create a nearest neighbour search object
+   *
+   * @param points :: vector of Eigen::Vectors to search through
+   */
+  NearestNeighbours(const std::vector<VectorType> &points) {
+    const auto numPoints = static_cast<int>(points.size());
+    if (numPoints == 0)
+      std::runtime_error(
+          "Need at least one point to initialise NearestNeighbours.");
+
+    m_dataPoints = make_unique<NNDataPoints>(numPoints, static_cast<int>(N));
+
+    for (size_t i = 0; i < points.size(); ++i) {
+      Eigen::Map<VectorType>(m_dataPoints->mutablePoint(static_cast<int>(i)), N,
+                             1) = points[i];
+    }
+    m_kdTree = make_unique<ANNkd_tree>(m_dataPoints->rawData(), numPoints,
+                                       static_cast<int>(N));
+  }
+
+  ~NearestNeighbours() { annClose(); }
+
+  NearestNeighbours(const NearestNeighbours &) = delete;
+
+  /** Find the k nearest neighbours to a given point
+   *
+   * This is a thin wrapper around the ANN library annkSearch method
+   *
+   * @param pos :: the position to find th k nearest neighbours of
+   * @param k :: the number of neighbours to find
+   * @param error :: error term for finding approximate nearest neighbours. if
+   * 	zero then exact neighbours will be found. (default = 0.0).
+   * @return vector neighbours as tuples of (position, index, distance)
+   */
+  NearestNeighbourResults findNearest(const VectorType &pos, const size_t k = 1,
+                                      const double error = 0.0) {
+    const auto numNeighbours = static_cast<int>(k);
+    // create arrays to store the indices & distances of nearest neighbours
+    auto nnIndexList = std::unique_ptr<ANNidx[]>(new ANNidx[numNeighbours]);
+    auto nnDistList = std::unique_ptr<ANNdist[]>(new ANNdist[numNeighbours]);
+
+    // create ANNpoint from Eigen array
+    auto point = std::unique_ptr<ANNcoord[]>(annAllocPt(N));
+    Eigen::Map<VectorType>(point.get(), N, 1) = pos;
+
+    // find the k nearest neighbours
+    m_kdTree->annkSearch(point.get(), numNeighbours, nnIndexList.get(),
+                         nnDistList.get(), error);
+
+    return makeResults(k, std::move(nnIndexList), std::move(nnDistList));
+  }
+
+private:
+  /** Helper function to create a instance of NearestNeighbourResults
+   *
+   * @param k :: the number of neighbours searched for
+   * @param nnIndexList :: the ordered list of indicies matching the closest k
+   *neighbours
+   * @param nnDistList :: the ordered list of distances matching the closest k
+   *neighbours
+   * @return a new NearestNeighbourResults object from the found items
+   */
+  NearestNeighbourResults
+  makeResults(const size_t k, const std::unique_ptr<ANNidx[]> nnIndexList,
+              const std::unique_ptr<ANNdist[]> nnDistList) {
+    NearestNeighbourResults results;
+    results.reserve(k);
+
+    for (size_t i = 0; i < k; ++i) {
+      // create Eigen array from ANNpoint
+      auto pos = m_dataPoints->mutablePoint(nnIndexList[i]);
+      VectorType point = Eigen::Map<VectorType>(pos, N, 1);
+      results.emplace_back(point, nnIndexList[i], nnDistList[i]);
+    }
+
+    return results;
+  }
+
+  /// handle to the list of data points to search through
+  std::unique_ptr<NNDataPoints> m_dataPoints;
+  /// handle to the ANN KD-tree used for searching
+  std::unique_ptr<ANNkd_tree> m_kdTree;
+};
+}
+}
+
+#endif
diff --git a/Framework/Kernel/inc/MantidKernel/Unit.h b/Framework/Kernel/inc/MantidKernel/Unit.h
index 83a5ba8af90b342f9683e2befbaecb0ebf35cef6..a2d8f33fcd064af3a6c4740dfdfef67459824953 100644
--- a/Framework/Kernel/inc/MantidKernel/Unit.h
+++ b/Framework/Kernel/inc/MantidKernel/Unit.h
@@ -408,6 +408,33 @@ protected:
   double factorFrom; ///< Constant factor for from conversion
 };
 
+//=================================================================================================
+/// d-SpacingPerpendicular in Angstrom
+class MANTID_KERNEL_DLL dSpacingPerpendicular : public Unit {
+public:
+  const std::string unitID() const override; ///< "dSpacingPerpendicular"
+  const std::string caption() const override {
+    return "d-SpacingPerpendicular";
+  }
+  const UnitLabel label() const override;
+
+  double singleToTOF(const double x) const override;
+  double singleFromTOF(const double tof) const override;
+  void init() override;
+  Unit *clone() const override;
+  double conversionTOFMin() const override;
+  double conversionTOFMax() const override;
+
+  /// Constructor
+  dSpacingPerpendicular();
+
+protected:
+  double factorTo;   ///< Constant factor for to conversion
+  double sfpTo;      ///< Extra correction factor in to conversion
+  double factorFrom; ///< Constant factor for from conversion
+  double sfpFrom;    ///< Extra correction factor in to conversion
+};
+
 //=================================================================================================
 /// Momentum Transfer in Angstrom^-1
 class MANTID_KERNEL_DLL MomentumTransfer : public Unit {
diff --git a/Framework/Kernel/src/Exception.cpp b/Framework/Kernel/src/Exception.cpp
index 387951f26b3b89cde1c56a04af5f54581869a0cd..ec245c1586f0749463da02d2909ea11072f43b7f 100644
--- a/Framework/Kernel/src/Exception.cpp
+++ b/Framework/Kernel/src/Exception.cpp
@@ -1,5 +1,5 @@
-#include <sstream>
 #include "MantidKernel/Exception.h"
+#include <sstream>
 
 namespace Mantid {
 namespace Kernel {
@@ -353,6 +353,29 @@ const char *InternetError::what() const noexcept { return outMessage.c_str(); }
 */
 const int &InternetError::errorCode() const { return m_errorCode; }
 
+//-------------------------
+// FitSizeError Error class
+//-------------------------
+
+/// Constructor.
+/// @param oldSize :: Old number of free fitting parameters
+FitSizeWarning::FitSizeWarning(size_t oldSize)
+    : std::exception(),
+      m_message(
+          "Number of fitting parameters is different from original value of " +
+          std::to_string(oldSize)) {}
+
+/// Constructor.
+/// @param oldSize :: Old number of free fitting parameters
+/// @param newSize :: New number of free fitting parameters
+FitSizeWarning::FitSizeWarning(size_t oldSize, size_t newSize)
+    : std::exception(),
+      m_message("Number of fitting parameters changed from " +
+                std::to_string(oldSize) + " to " + std::to_string(newSize)) {}
+
+/// Get the warning message.
+const char *FitSizeWarning::what() const noexcept { return m_message.c_str(); }
+
 } // namespace Exception
 } // namespace Kernel
 } // namespace Mantid
diff --git a/Framework/Kernel/src/NexusDescriptor.cpp b/Framework/Kernel/src/NexusDescriptor.cpp
index 3efd1c29011fc55152493066945571888012f503..8aa73127a37efa2ef8cf6e99321c7ae89e82a91c 100644
--- a/Framework/Kernel/src/NexusDescriptor.cpp
+++ b/Framework/Kernel/src/NexusDescriptor.cpp
@@ -1,5 +1,7 @@
 #include "MantidKernel/NexusDescriptor.h"
 
+#include <boost/algorithm/string.hpp>
+
 #include <nexus/NeXusFile.hpp>
 #include <nexus/NeXusException.hpp>
 
@@ -247,7 +249,7 @@ void NexusDescriptor::walkFile(::NeXus::File &file, const std::string &rootPath,
     const std::string &entryClass = it->second;
     const std::string entryPath =
         std::string(rootPath).append("/").append(entryName);
-    if (entryClass == "SDS") {
+    if (entryClass == "SDS" || entryClass == "ILL_data_scan_vars") {
       pmap.emplace(entryPath, entryClass);
     } else if (entryClass == "CDF0.0") {
       // Do nothing with this
diff --git a/Framework/Kernel/src/Unit.cpp b/Framework/Kernel/src/Unit.cpp
index 1e8ee389c90e51e23fa51132005a683858b00442..1146334d2f5aeaae1762e80c48143aff8f1cebde 100644
--- a/Framework/Kernel/src/Unit.cpp
+++ b/Framework/Kernel/src/Unit.cpp
@@ -583,6 +583,61 @@ double dSpacing::conversionTOFMax() const { return DBL_MAX / factorTo; }
 
 Unit *dSpacing::clone() const { return new dSpacing(*this); }
 
+// ==================================================================================================
+/* D-SPACING Perpendicular
+ * ==================================================================================================
+ *
+ * Conversion uses equation: dp^2 = lambda^2 - 2[Angstrom^2]*ln(cos(theta))
+ */
+DECLARE_UNIT(dSpacingPerpendicular)
+
+const UnitLabel dSpacingPerpendicular::label() const {
+  return Symbol::Angstrom;
+}
+
+dSpacingPerpendicular::dSpacingPerpendicular()
+    : Unit(), factorTo(DBL_MIN), factorFrom(DBL_MIN) {}
+
+void dSpacingPerpendicular::init() {
+  factorTo =
+      (PhysicalConstants::NeutronMass * (l1 + l2)) / PhysicalConstants::h;
+
+  // Now adjustments for the scale of units used
+  const double TOFisinMicroseconds = 1e6;
+  const double toAngstroms = 1e10;
+  factorTo *= TOFisinMicroseconds / toAngstroms;
+  factorFrom = factorTo;
+  if (factorFrom == 0.0)
+    factorFrom = DBL_MIN; // Protect against divide by zero
+  double cos_theta = cos(twoTheta / 2.0);
+  sfpTo = 0.0;
+  if (cos_theta > 0)
+    sfpTo = 2.0 * log(cos_theta);
+  sfpFrom = sfpTo;
+}
+
+double dSpacingPerpendicular::singleToTOF(const double x) const {
+  double sqrtarg = x * x + sfpTo;
+  // consider very small values to be a rounding error
+  if (sqrtarg < 1.0e-17)
+    return 0.0;
+  return sqrt(sqrtarg) * factorTo;
+}
+double dSpacingPerpendicular::singleFromTOF(const double tof) const {
+  double temp = tof / factorFrom;
+  return sqrt(temp * temp - sfpFrom);
+}
+double dSpacingPerpendicular::conversionTOFMin() const {
+  return sqrt(-1.0 * sfpFrom);
+}
+double dSpacingPerpendicular::conversionTOFMax() const {
+  return sqrt(std::numeric_limits<double>::max()) / factorFrom;
+}
+
+Unit *dSpacingPerpendicular::clone() const {
+  return new dSpacingPerpendicular(*this);
+}
+
 // ================================================================================
 /* MOMENTUM TRANSFER
  * ================================================================================
diff --git a/Framework/Kernel/test/NearestNeighboursTest.h b/Framework/Kernel/test/NearestNeighboursTest.h
new file mode 100644
index 0000000000000000000000000000000000000000..53f453aa126da20eee9bceeb3bfe5a3edcf69704
--- /dev/null
+++ b/Framework/Kernel/test/NearestNeighboursTest.h
@@ -0,0 +1,65 @@
+#ifndef MANTID_KERNEL_NEARESTNEIGHBOURSTEST_H_
+#define MANTID_KERNEL_NEARESTNEIGHBOURSTEST_H_
+
+#include <cxxtest/TestSuite.h>
+#include "MantidKernel/NearestNeighbours.h"
+
+using Mantid::Kernel::NearestNeighbours;
+using namespace Eigen;
+
+class NearestNeighboursTest : public CxxTest::TestSuite {
+public:
+  NearestNeighboursTest() {}
+
+  void test_construct() {
+    std::vector<Vector3d> pts1 = {Vector3d(1, 1, 1), Vector3d(2, 2, 2)};
+    TS_ASSERT_THROWS_NOTHING(NearestNeighbours<3> nn(pts1));
+
+    std::vector<Vector2d> pts2 = {Vector2d(1, 1), Vector2d(2, 2)};
+    TS_ASSERT_THROWS_NOTHING(NearestNeighbours<2> nn(pts2));
+  }
+
+  void test_find_nearest() {
+    std::vector<Eigen::Vector3d> pts = {Vector3d(1, 1, 1), Vector3d(2, 2, 2)};
+    NearestNeighbours<3> nn(pts);
+
+    auto results = nn.findNearest(Vector3d(1, 1, 0.9));
+    TS_ASSERT_EQUALS(results.size(), 1)
+
+    Eigen::Vector3d pos = std::get<0>(results[0]);
+    auto index = std::get<1>(results[0]);
+    auto dist = std::get<2>(results[0]);
+    TS_ASSERT_EQUALS(pos[0], 1)
+    TS_ASSERT_EQUALS(pos[1], 1)
+    TS_ASSERT_EQUALS(pos[2], 1)
+    TS_ASSERT_EQUALS(index, 0)
+    TS_ASSERT_DELTA(dist, 0, 0.01)
+  }
+
+  void test_find_nearest_2() {
+    std::vector<Eigen::Vector2d> pts = {Vector2d(1, 1), Vector2d(2, 2),
+                                        Vector2d(2, 3)};
+    NearestNeighbours<2> nn(pts);
+
+    auto results = nn.findNearest(Vector2d(1, 0.9), 2);
+    TS_ASSERT_EQUALS(results.size(), 2)
+
+    Eigen::Vector2d pos = std::get<0>(results[0]);
+    auto index = std::get<1>(results[0]);
+    auto dist = std::get<2>(results[0]);
+    TS_ASSERT_EQUALS(pos[0], 1)
+    TS_ASSERT_EQUALS(pos[1], 1)
+    TS_ASSERT_EQUALS(index, 0)
+    TS_ASSERT_DELTA(dist, 0, 0.01)
+
+    pos = std::get<0>(results[1]);
+    index = std::get<1>(results[1]);
+    dist = std::get<2>(results[1]);
+    TS_ASSERT_EQUALS(pos[0], 2)
+    TS_ASSERT_EQUALS(pos[1], 2)
+    TS_ASSERT_EQUALS(index, 1)
+    TS_ASSERT_DELTA(dist, 2.21, 0.01)
+  }
+};
+
+#endif
diff --git a/Framework/Kernel/test/UnitTest.h b/Framework/Kernel/test/UnitTest.h
index 97be03c941fad28fd34c5b9fa3571603e262e478..0e6aad1d046e4e5b736adbb17b8d0e2055c16bd1 100644
--- a/Framework/Kernel/test/UnitTest.h
+++ b/Framework/Kernel/test/UnitTest.h
@@ -217,6 +217,9 @@ public:
     unit = dSpacing().clone();
     TS_ASSERT(dynamic_cast<dSpacing *>(unit));
     delete unit;
+    unit = dSpacingPerpendicular().clone();
+    TS_ASSERT(dynamic_cast<dSpacingPerpendicular *>(unit));
+    delete unit;
     unit = MomentumTransfer().clone();
     TS_ASSERT(dynamic_cast<MomentumTransfer *>(unit));
     delete unit;
@@ -592,6 +595,66 @@ public:
     }
   }
 
+  //----------------------------------------------------------------------
+  // d-SpacingPerpebdicular tests
+  //----------------------------------------------------------------------
+
+  void testdSpacingPerpendicular_unitID() {
+    TS_ASSERT_EQUALS(dp.unitID(), "dSpacingPerpendicular")
+  }
+
+  void testdSpacingPerpendicular_caption() {
+    TS_ASSERT_EQUALS(dp.caption(), "d-SpacingPerpendicular")
+  }
+
+  void testdSpacingPerpendicular_label() {
+    TS_ASSERT_EQUALS(dp.label().ascii(), "Angstrom")
+    TS_ASSERT_EQUALS(dp.label().utf8(), L"\u212b")
+  }
+
+  void testdSpacingPerpendicular_cast() {
+    Unit *u = NULL;
+    TS_ASSERT_THROWS_NOTHING(u = dynamic_cast<Unit *>(&dp));
+    TS_ASSERT_EQUALS(u->unitID(), "dSpacingPerpendicular");
+  }
+
+  void testdSpacingPerpendicular_toTOF() {
+    std::vector<double> x(1, 1.0), y(1, 1.0);
+    std::vector<double> yy = y;
+    TS_ASSERT_THROWS_NOTHING(dp.toTOF(x, y, 1.0, 1.0, 1.0, 1, 1.0, 1.0))
+    TS_ASSERT_DELTA(x[0], 434.5529, 0.0001)
+    TS_ASSERT(yy == y)
+  }
+
+  void testdSpacingPerpendicular_fromTOF() {
+    std::vector<double> x(1, 1001.1), y(1, 1.0);
+    std::vector<double> yy = y;
+    TS_ASSERT_THROWS_NOTHING(dp.fromTOF(x, y, 1.0, 1.0, 1.0, 1, 1.0, 1.0))
+    TS_ASSERT_DELTA(x[0], 2.045075, 0.000001)
+    TS_ASSERT(yy == y)
+  }
+
+  void testdSpacingPerpendicularRange() {
+    std::vector<double> sample, rezult;
+
+    std::string err_mess = convert_units_check_range(dp, sample, rezult);
+    TSM_ASSERT(" ERROR:" + err_mess, err_mess.size() == 0);
+
+    for (size_t i = 0; i < sample.size(); i++) {
+      if (std::fabs(sample[i]) < 10 * FLT_EPSILON) {
+        TSM_ASSERT_DELTA(
+            "d-spacingPerpendicular limits Failed for conversion N: " +
+                boost::lexical_cast<std::string>(i),
+            sample[i], rezult[i], 10 * FLT_EPSILON);
+      } else {
+        TSM_ASSERT_DELTA(
+            "d-spacingPerpendicular limits Failed for conversion N: " +
+                boost::lexical_cast<std::string>(i),
+            rezult[i] / sample[i], 1., 10 * FLT_EPSILON);
+      }
+    }
+  }
+
   //----------------------------------------------------------------------
   // Momentum Transfer tests
   //----------------------------------------------------------------------
@@ -1334,6 +1397,7 @@ private:
   Units::Energy energy;
   Units::Energy_inWavenumber energyk;
   Units::dSpacing d;
+  Units::dSpacingPerpendicular dp;
   Units::MomentumTransfer q;
   Units::QSquared q2;
   Units::DeltaE dE;
diff --git a/Framework/MDAlgorithms/src/Quantification/SimulateResolutionConvolvedModel.cpp b/Framework/MDAlgorithms/src/Quantification/SimulateResolutionConvolvedModel.cpp
index ab4a7cc60d7f52ccf4a2dc76b816ee5912e342ce..ceb1b43b96a43edf38513cb31aaa67ab6b1d29ec 100644
--- a/Framework/MDAlgorithms/src/Quantification/SimulateResolutionConvolvedModel.cpp
+++ b/Framework/MDAlgorithms/src/Quantification/SimulateResolutionConvolvedModel.cpp
@@ -13,6 +13,7 @@
 #include "MantidMDAlgorithms/Quantification/ForegroundModelFactory.h"
 #include "MantidMDAlgorithms/Quantification/MDResolutionConvolutionFactory.h"
 #include "MantidMDAlgorithms/Quantification/ResolutionConvolvedCrossSection.h"
+#include <boost/make_shared.hpp>
 
 namespace Mantid {
 namespace MDAlgorithms {
@@ -91,15 +92,15 @@ void SimulateResolutionConvolvedModel::init() {
 void SimulateResolutionConvolvedModel::exec() {
   m_inputWS = getProperty("InputWorkspace");
   // First estimate of progress calls
-  API::Progress progress(this, 0.0, 1.0,
-                         static_cast<size_t>(m_inputWS->getNPoints()));
-  progress.report("Caching simulation input");
+  auto progress = boost::make_shared<API::Progress>(
+      this, 0.0, 1.0, static_cast<size_t>(m_inputWS->getNPoints()));
+  progress->report("Caching simulation input");
   auto resolution = createFunction();
   createDomains();
 
   // Do the real work
-  progress.setNumSteps(resolution->estimateNoProgressCalls());
-  resolution->setProgressReporter(&progress);
+  progress->setNumSteps(resolution->estimateNoProgressCalls());
+  resolution->setProgressReporter(progress);
   resolution->function(*m_domain, *m_calculatedValues);
 
   // If output workspace exists just add the events to that
diff --git a/Framework/PythonInterface/inc/MantidPythonInterface/api/FitFunctions/IFunctionAdapter.h b/Framework/PythonInterface/inc/MantidPythonInterface/api/FitFunctions/IFunctionAdapter.h
index 170c3334896d3cda61b1354d26252c26be2645f9..5a180e64328ed7d3a7ba1bb4873efd39ad6fbfc9 100644
--- a/Framework/PythonInterface/inc/MantidPythonInterface/api/FitFunctions/IFunctionAdapter.h
+++ b/Framework/PythonInterface/inc/MantidPythonInterface/api/FitFunctions/IFunctionAdapter.h
@@ -56,12 +56,14 @@ public:
   /// Returns the attribute's value as a Python object
   static PyObject *getAttributeValue(IFunction &self,
                                      const API::IFunction::Attribute &attr);
+  /// Set the attribute's value
+  static void setAttributePythonValue(IFunction &self, const std::string &name,
+                                      const boost::python::object &value);
   /// Called by the framework when an attribute has been set
   void setAttribute(const std::string &attName,
                     const API::IFunction::Attribute &attr) override;
-  /// Store the attribute's value in the default IFunction's cache
-  void storeAttributePythonValue(const std::string &name,
-                                 const boost::python::object &value);
+  /// Split this function (if needed) into a list of independent functions
+  static boost::python::object createPythonEquivalentFunctions(IFunction &self);
 
   // Each overload of declareParameter requires a different name as we
   // can't use a function pointer with a virtual base class
diff --git a/Framework/PythonInterface/mantid/api/src/Exports/IFunction.cpp b/Framework/PythonInterface/mantid/api/src/Exports/IFunction.cpp
index f592ed556176c3a30e8e470dbb89ad2aa622f295..dec138bd642ca0805d275c34a1d33a9cb70bf91c 100644
--- a/Framework/PythonInterface/mantid/api/src/Exports/IFunction.cpp
+++ b/Framework/PythonInterface/mantid/api/src/Exports/IFunction.cpp
@@ -49,6 +49,13 @@ typedef void (IFunction::*setParameterType2)(const std::string &,
                                              const double &value, bool);
 BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(setParameterType2_Overloads,
                                        setParameter, 2, 3)
+BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(tie_Overloads, tie, 2, 3)
+BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(addConstraints_Overloads, addConstraints,
+                                       1, 2)
+BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(fixParameter_Overloads, fixParameter, 1,
+                                       2)
+BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(fix_Overloads, fix, 1, 2)
+BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(fixAll_Overloads, fixAll, 0, 1)
 #ifdef __clang__
 #pragma clang diagnostic pop
 #endif
@@ -122,9 +129,9 @@ void export_IFunction() {
            (arg("self"), arg("name")),
            "Return the value of the named attribute")
 
-      .def("storeAttributeValue", &IFunctionAdapter::storeAttributePythonValue,
+      .def("setAttributeValue", &IFunctionAdapter::setAttributePythonValue,
            (arg("self"), arg("name"), arg("value")),
-           "Store an attribute value in the default cache")
+           "Set a value of a named attribute")
 
       .def("declareParameter", &IFunctionAdapter::declareFitParameter,
            (arg("self"), arg("name"), arg("init_value"), arg("description")),
@@ -139,6 +146,59 @@ void export_IFunction() {
            (arg("self"), arg("name")),
            "Declare a fitting parameter settings its default value to 0.0")
 
+      .def("fixParameter", &IFunction::fix,
+           fix_Overloads((arg("self"), arg("i"), arg("isDefault")),
+                         "Fix the ith parameter"))
+
+      .def("fixParameter", &IFunction::fixParameter,
+           fixParameter_Overloads((arg("self"), arg("name"), arg("isDefault")),
+                                  "Fix the named parameter"))
+
+      .def("freeParameter", &IFunction::unfix, (arg("self"), arg("i")),
+           "Free the ith parameter")
+
+      .def("freeParameter", &IFunction::unfixParameter,
+           (arg("self"), arg("name")), "Free the named parameter")
+
+      .def("isFixed", &IFunction::isFixed, (arg("self"), arg("i")),
+           "Return whether the ith parameter is fixed or tied")
+
+      .def("fixAll", &IFunction::fixAll,
+           fixAll_Overloads((arg("self"), arg("isDefault")),
+                            "Fix all parameters"))
+
+      .def("freeAll", &IFunction::unfixAll, (arg("self")),
+           "Free all parameters")
+
+      .def("tie", &IFunction::tie,
+           tie_Overloads(
+               (arg("self"), arg("name"), arg("expr"), arg("isDefault")),
+               "Tie a named parameter to an expression"))
+
+      .def("removeTie", (bool (IFunction::*)(size_t)) & IFunction::removeTie,
+           (arg("self"), arg("i")), "Remove the tie of the ith parameter")
+
+      .def("removeTie",
+           (void (IFunction::*)(const std::string &)) & IFunction::removeTie,
+           (arg("self"), arg("name")), "Remove the tie of the named parameter")
+
+      .def("addConstraints", &IFunction::addConstraints,
+           addConstraints_Overloads(
+               (arg("self"), arg("constraints"), arg("isDefault")),
+               "Constrain named parameters"))
+
+      .def("removeConstraint", &IFunction::removeConstraint,
+           (arg("self"), arg("name")),
+           "Remove the constraint on the named parameter")
+
+      .def("getNumberDomains", &IFunction::getNumberDomains, (arg("self")),
+           "Get number of domains of a multi-domain function")
+
+      .def("createEquivalentFunctions",
+           &IFunctionAdapter::createPythonEquivalentFunctions, (arg("self")),
+           "Split this function (if needed) into a list of "
+           "independent functions")
+
       //-- Deprecated functions that have the wrong names --
       .def("categories", &getCategories, arg("self"),
            "Returns a list of the categories for an algorithm")
diff --git a/Framework/PythonInterface/mantid/api/src/FitFunctions/IFunctionAdapter.cpp b/Framework/PythonInterface/mantid/api/src/FitFunctions/IFunctionAdapter.cpp
index 508265caed4cf0b14519753be0f68153e3c61fe8..b10aa0db5ca9a52ae2d5f991055748dd12a59176 100644
--- a/Framework/PythonInterface/mantid/api/src/FitFunctions/IFunctionAdapter.cpp
+++ b/Framework/PythonInterface/mantid/api/src/FitFunctions/IFunctionAdapter.cpp
@@ -2,6 +2,7 @@
 #include "MantidPythonInterface/kernel/Environment/CallMethod.h"
 
 #include <boost/python/class.hpp>
+#include <boost/python/list.hpp>
 
 namespace Mantid {
 namespace PythonInterface {
@@ -41,9 +42,17 @@ IFunction::Attribute createAttributeFromPythonValue(const object &value) {
   else if (PyBytes_Check(rawptr) == 1) {
 #endif
     attr = IFunction::Attribute(extract<std::string>(rawptr)());
+  } else if (PyList_Check(rawptr) == 1) {
+    auto n = PyList_Size(rawptr);
+    std::vector<double> vec;
+    for (Py_ssize_t i = 0; i < n; ++i) {
+      auto v = extract<double>(PyList_GetItem(rawptr, i))();
+      vec.push_back(v);
+    }
+    attr = IFunction::Attribute(vec);
   } else
     throw std::invalid_argument(
-        "Invalid attribute type. Allowed types=float,int,str,bool");
+        "Invalid attribute type. Allowed types=float,int,str,bool,list(float)");
 
   return attr;
 }
@@ -125,12 +134,27 @@ IFunctionAdapter::getAttributeValue(IFunction &self,
     result = to_python_value<const std::string &>()(attr.asString());
   else if (type == "bool")
     result = to_python_value<const bool &>()(attr.asBool());
+  else if (type == "std::vector<double>")
+    result = to_python_value<const std::vector<double> &>()(attr.asVector());
   else
     throw std::runtime_error("Unknown attribute type, cannot convert C++ type "
                              "to Python. Contact developement team.");
   return result;
 }
 
+/**
+ * Set the attribute's value in the default IFunction's cache
+ * @param self :: A reference to a function object that has the attribute.
+ * @param name :: The name of the attribute
+ * @param value :: The value to set
+ */
+void IFunctionAdapter::setAttributePythonValue(IFunction &self,
+                                               const std::string &name,
+                                               const object &value) {
+  auto attr = createAttributeFromPythonValue(value);
+  self.setAttribute(name, attr);
+}
+
 /**
  * Calls setAttributeValue on the Python object if it exists otherwise calls the
  * base class method
@@ -148,15 +172,20 @@ void IFunctionAdapter::setAttribute(const std::string &attName,
   }
 }
 
-/**
- * Store the attribute's value in the default IFunction's cache
- * @param name :: The name of the attribute
- * @param value :: The value to store
+/** Split this function (if needed) into a list of independent functions.
+ * @param self :: A reference to a function object. If it's a multi-domain
+ *    function the result should in general contain more than 1 function.
+ *    For a single domain function it should have a single element (self).
+ * @return A python list of IFunction_sprs.
  */
-void IFunctionAdapter::storeAttributePythonValue(const std::string &name,
-                                                 const object &value) {
-  auto attr = createAttributeFromPythonValue(value);
-  storeAttributeValue(name, attr);
+boost::python::object
+IFunctionAdapter::createPythonEquivalentFunctions(IFunction &self) {
+  auto functions = self.createEquivalentFunctions();
+  boost::python::list list;
+  for (auto fun : functions) {
+    list.append(fun);
+  }
+  return list;
 }
 
 /**
diff --git a/Framework/PythonInterface/plugins/algorithms/AlignAndFocusPowderFromFiles.py b/Framework/PythonInterface/plugins/algorithms/AlignAndFocusPowderFromFiles.py
index 53ec2455bd3c57a2bdc9411fa58e07df00c59b3f..f31cc942d72e2ddff864d4b459d6acccd3c86b3b 100644
--- a/Framework/PythonInterface/plugins/algorithms/AlignAndFocusPowderFromFiles.py
+++ b/Framework/PythonInterface/plugins/algorithms/AlignAndFocusPowderFromFiles.py
@@ -20,6 +20,7 @@ PROPS_FOR_ALIGN = ["CalFileName", "GroupFilename", "GroupingWorkspace",
                    "CropWavelengthMin", "CropWavelengthMax",
                    "LowResSpectrumOffset", "ReductionProperties"]
 PROPS_FOR_ALIGN.extend(PROPS_FOR_INSTR)
+PROPS_FOR_PD_CHARACTER = ['FrequencyLogNames', 'WaveLengthLogNames']
 
 
 def determineChunking(filename, chunkSize):
@@ -78,6 +79,7 @@ class AlignAndFocusPowderFromFiles(DataProcessorAlgorithm):
                              'Characterizations table')
 
         self.copyProperties("AlignAndFocusPowder", PROPS_FOR_ALIGN)
+        self.copyProperties('PDDetermineCharacterizations', PROPS_FOR_PD_CHARACTER)
 
     def _getLinearizedFilenames(self, propertyName):
         runnumbers = self.getProperty(propertyName).value
@@ -101,13 +103,18 @@ class AlignAndFocusPowderFromFiles(DataProcessorAlgorithm):
         tempname = '__%s_temp' % wkspname
         Load(Filename=filename, OutputWorkspace=tempname,
              MetaDataOnly=True)
+
+        # put together argument list
+        args = dict(InputWorkspace=tempname,
+                    ReductionProperties=self.getProperty('ReductionProperties').valueAsStr)
+        for name in PROPS_FOR_PD_CHARACTER:
+            prop = self.getProperty(name)
+            if not prop.isDefault:
+                args[name] = prop.value
         if self.charac is not None:
-            PDDetermineCharacterizations(InputWorkspace=tempname,
-                                         Characterizations=self.charac,
-                                         ReductionProperties=self.getProperty('ReductionProperties').valueAsStr)
-        else:
-            PDDetermineCharacterizations(InputWorkspace=tempname,
-                                         ReductionProperties=self.getProperty('ReductionProperties').valueAsStr)
+            args['Characterizations'] = self.charac
+
+        PDDetermineCharacterizations(**args)
         DeleteWorkspace(Workspace=tempname)
 
     def __getCacheName(self, wkspname):
@@ -223,6 +230,10 @@ class AlignAndFocusPowderFromFiles(DataProcessorAlgorithm):
                 if self.kwargs['PreserveEvents']:
                     CompressEvents(InputWorkspace=finalname, OutputWorkspace=finalname)
 
+        # with more than one chunk or file the integrated proton charge is
+        # generically wrong
+        mtd[finalname].run().integrateProtonCharge()
+
         # set the output workspace
         self.setProperty('OutputWorkspace', mtd[finalname])
 
diff --git a/Framework/PythonInterface/plugins/algorithms/BASISReduction.py b/Framework/PythonInterface/plugins/algorithms/BASISReduction.py
index 3d63236e7d3544a29492961baf300512b7d435d2..28c0eca22ecd0948cc8300fec0a361f1a5767b19 100644
--- a/Framework/PythonInterface/plugins/algorithms/BASISReduction.py
+++ b/Framework/PythonInterface/plugins/algorithms/BASISReduction.py
@@ -379,7 +379,8 @@ class BASISReduction(PythonAlgorithm):
         self._sumRuns(run_set, wsName, wsName_mon, extra_extension)
         self._calibData(wsName, wsName_mon)
         if not self._debugMode:
-            sapi.DeleteWorkspace(wsName_mon)  # delete monitors
+            if not self._noMonNorm:
+                sapi.DeleteWorkspace(wsName_mon)  # delete monitors
         return wsName
 
     def _group_and_SofQW(self, wsName, etRebins, isSample=True):
diff --git a/Framework/PythonInterface/plugins/algorithms/DeltaPDF3D.py b/Framework/PythonInterface/plugins/algorithms/DeltaPDF3D.py
index 7bc2595d2bb4dffef4f26c57bac8be3e4d3c6c3d..a2a672070c19655a13a053d12d75562353d07425 100644
--- a/Framework/PythonInterface/plugins/algorithms/DeltaPDF3D.py
+++ b/Framework/PythonInterface/plugins/algorithms/DeltaPDF3D.py
@@ -34,13 +34,13 @@ class DeltaPDF3D(PythonAlgorithm):
 
         self.declareProperty("RemoveReflections", True, "Remove HKL reflections")
         condition = EnabledWhenProperty("RemoveReflections", PropertyCriterion.IsDefault)
-        self.declareProperty("Shape", "cube", doc="Shape to cut out reflections",
+        self.declareProperty("Shape", "sphere", doc="Shape to cut out reflections",
                              validator=StringListValidator(['sphere', 'cube']))
         self.setPropertySettings("Shape", condition)
         val_min_zero = FloatArrayBoundedValidator()
         val_min_zero.setLower(0.)
         self.declareProperty(FloatArrayProperty("Size", [0.2], validator=val_min_zero),
-                             "Width of cube/diameter of sphere used to remove reflections, in (HKL)")
+                             "Width of cube/diameter of sphere used to remove reflections, in (HKL) (one or three values)")
         self.setPropertySettings("Size", condition)
         self.declareProperty("SpaceGroup", "",
                              doc="Space group for reflection removal, either full name or number. If empty all HKL's will be removed.")
@@ -48,16 +48,22 @@ class DeltaPDF3D(PythonAlgorithm):
 
         self.declareProperty("CropSphere", False, "Limit min/max q values. Can help with edge effects.")
         condition = EnabledWhenProperty("CropSphere", PropertyCriterion.IsNotDefault)
-        self.declareProperty(FloatArrayProperty("SphereMin", [Property.EMPTY_DBL], validator=val_min_zero), "Min Sphere")
+        self.declareProperty(FloatArrayProperty("SphereMin", [Property.EMPTY_DBL], validator=val_min_zero),
+                             "HKL values below which will be removed (one or three values)")
         self.setPropertySettings("SphereMin", condition)
-        self.declareProperty(FloatArrayProperty("SphereMax", [Property.EMPTY_DBL], validator=val_min_zero), "Max Sphere")
+        self.declareProperty(FloatArrayProperty("SphereMax", [Property.EMPTY_DBL], validator=val_min_zero),
+                             "HKL values above which will be removed (one or three values)")
         self.setPropertySettings("SphereMax", condition)
+        self.declareProperty("FillValue", Property.EMPTY_DBL, "Value to replace with outside sphere")
+        self.setPropertySettings("FillValue", condition)
 
         self.declareProperty("Convolution", True, "Apply convolution to fill in removed reflections")
         condition = EnabledWhenProperty("Convolution", PropertyCriterion.IsDefault)
         self.declareProperty("ConvolutionWidth", 2.0, validator=FloatBoundedValidator(0.),
                              doc="Width of gaussian convolution in pixels")
         self.setPropertySettings("ConvolutionWidth", condition)
+        self.declareProperty("Deconvolution", False, "Apply deconvolution after fourier transform")
+        self.setPropertySettings("Deconvolution", condition)
 
         # Reflections
         self.setPropertyGroup("RemoveReflections","Reflection Removal")
@@ -69,10 +75,12 @@ class DeltaPDF3D(PythonAlgorithm):
         self.setPropertyGroup("CropSphere","Cropping to a sphere")
         self.setPropertyGroup("SphereMin","Cropping to a sphere")
         self.setPropertyGroup("SphereMax","Cropping to a sphere")
+        self.setPropertyGroup("FillValue","Cropping to a sphere")
 
         # Convolution
         self.setPropertyGroup("Convolution","Convolution")
         self.setPropertyGroup("ConvolutionWidth","Convolution")
+        self.setPropertyGroup("Deconvolution","Convolution")
 
     def validateInputs(self):
         issues = dict()
@@ -85,10 +93,10 @@ class DeltaPDF3D(PythonAlgorithm):
         if dimX.name != '[H,0,0]' or dimY.name != '[0,K,0]' or dimZ.name != '[0,0,L]':
             issues['InputWorkspace'] = 'dimensions must be [H,0,0], [0,K,0] and [0,0,L]'
 
-        if (dimX.getMaximum() != -dimX.getMinimum() or
-                dimY.getMaximum() != -dimY.getMinimum() or
-                dimZ.getMaximum() != -dimZ.getMinimum()):
-            issues['InputWorkspace'] = 'dimensions must be centered on zero'
+        for d in range(inWS.getNumDims()):
+            dim = inWS.getDimension(d)
+            if not np.isclose(dim.getMaximum(), -dim.getMinimum()):
+                issues['InputWorkspace'] = 'dimensions must be centered on zero'
 
         if self.getProperty("Convolution").value:
             try:
@@ -145,6 +153,10 @@ class DeltaPDF3D(PythonAlgorithm):
         Y=np.linspace(Ymin,Ymax,Ybins+1)
         Z=np.linspace(Zmin,Zmax,Zbins+1)
 
+        X, Y, Z = np.ogrid[(dimX.getX(0)+dimX.getX(1))/2:(dimX.getX(Xbins)+dimX.getX(Xbins-1))/2:Xbins*1j,
+                           (dimY.getX(0)+dimY.getX(1))/2:(dimY.getX(Ybins)+dimY.getX(Ybins-1))/2:Ybins*1j,
+                           (dimZ.getX(0)+dimZ.getX(1))/2:(dimZ.getX(Zbins)+dimZ.getX(Zbins-1))/2:Zbins*1j]
+
         if self.getProperty("RemoveReflections").value:
             progress.report("Removing Reflections")
             size = self.getProperty("Size").value
@@ -173,33 +185,39 @@ class DeltaPDF3D(PythonAlgorithm):
                                        int((k-size[1]-Ymin)/Ywidth+1):int((k+size[1]-Ymin)/Ywidth),
                                        int((l-size[2]-Zmin)/Zwidth+1):int((l+size[2]-Zmin)/Zwidth)]=np.nan
             else:  # sphere
-                Xst = ((X[:-1]+X[1:])/2).reshape((Xbins, 1, 1))
-                Yst = ((Y[:-1]+Y[1:])/2).reshape((1, Ybins, 1))
-                Zst = ((Z[:-1]+Z[1:])/2).reshape((1, 1, Zbins))
+                mask=((X-np.round(X))**2/size[0]**2 + (Y-np.round(Y))**2/size[1]**2 + (Z-np.round(Z))**2/size[2]**2 < 1)
 
-                for h in range(int(np.ceil(Xmin)), int(Xmax)+1):
-                    for k in range(int(np.ceil(Ymin)), int(Ymax)+1):
-                        for l in range(int(np.ceil(Zmin)), int(Zmax)+1):
-                            if not check_space_group or sg.isAllowedReflection([h,k,l]):
-                                signal[(Xst-h)**2/size[0]**2 + (Yst-k)**2/size[1]**2 + (Zst-l)**2/size[2]**2 < 1]=np.nan
+                # Unmask invalid reflections
+                if check_space_group:
+                    for h in range(int(np.ceil(Xmin)), int(Xmax)+1):
+                        for k in range(int(np.ceil(Ymin)), int(Ymax)+1):
+                            for l in range(int(np.ceil(Zmin)), int(Zmax)+1):
+                                if not sg.isAllowedReflection([h,k,l]):
+                                    mask[int((h-0.5-Xmin)/Xwidth+1):int((h+0.5-Xmin)/Xwidth),
+                                         int((k-0.5-Ymin)/Ywidth+1):int((k+0.5-Ymin)/Ywidth),
+                                         int((l-0.5-Zmin)/Zwidth+1):int((l+0.5-Zmin)/Zwidth)]=False
+
+                signal[mask]=np.nan
 
         if self.getProperty("CropSphere").value:
             progress.report("Cropping to sphere")
             sphereMin = self.getProperty("SphereMin").value
 
-            Xs, Ys, Zs = np.mgrid[(X[0]+X[1])/2:(X[-1]+X[-2])/2:Xbins*1j,
-                                  (Y[0]+Y[1])/2:(Y[-1]+Y[-2])/2:Ybins*1j,
-                                  (Z[0]+Z[1])/2:(Z[-1]+Z[-2])/2:Zbins*1j]
-
             if sphereMin[0] < Property.EMPTY_DBL:
                 if len(sphereMin)==1:
                     sphereMin = np.repeat(sphereMin, 3)
-                signal[Xs**2/sphereMin[0]**2 + Ys**2/sphereMin[1]**2 + Zs**2/sphereMin[2]**2 < 1]=np.nan
+                signal[X**2/sphereMin[0]**2 + Y**2/sphereMin[1]**2 + Z**2/sphereMin[2]**2 < 1]=np.nan
+
             sphereMax = self.getProperty("SphereMax").value
+
             if sphereMax[0] < Property.EMPTY_DBL:
                 if len(sphereMax)==1:
                     sphereMax = np.repeat(sphereMax, 3)
-                signal[Xs**2/sphereMax[0]**2 + Ys**2/sphereMax[1]**2 + Zs**2/sphereMax[2]**2 > 1]=np.nan
+                if self.getProperty("FillValue").value == Property.EMPTY_DBL:
+                    fill_value = np.nan
+                else:
+                    fill_value = self.getProperty("FillValue").value
+                signal[X**2/sphereMax[0]**2 + Y**2/sphereMax[1]**2 + Z**2/sphereMax[2]**2 > 1]=fill_value
 
         if self.getProperty("Convolution").value:
             progress.report("Convoluting signal")
@@ -220,11 +238,15 @@ class DeltaPDF3D(PythonAlgorithm):
         signal[np.isnan(signal)]=0
         signal[np.isinf(signal)]=0
 
-        signal=np.fft.fftshift(np.fft.fftn(np.fft.ifftshift(signal))).real
+        signal=np.fft.fftshift(np.fft.fftn(np.fft.ifftshift(signal)))
         number_of_bins = signal.shape
 
+        # Do deconvolution
+        if self.getProperty("Convolution").value and self.getProperty("Deconvolution").value:
+            signal /= self._deconvolution(np.array(signal.shape))
+
         # CreateMDHistoWorkspace expects Fortan `column-major` ordering
-        signal = signal.flatten('F')
+        signal = signal.real.flatten('F')
 
         createWS_alg = self.createChildAlgorithm("CreateMDHistoWorkspace", enableLogging=False)
         createWS_alg.setProperty("SignalInput", signal)
@@ -247,7 +269,7 @@ class DeltaPDF3D(PythonAlgorithm):
 
     def _convolution(self, signal):
         from astropy.convolution import convolve, convolve_fft, Gaussian1DKernel
-        G1D = Gaussian1DKernel(2).array
+        G1D = Gaussian1DKernel(self.getProperty("ConvolutionWidth").value).array
         G3D = G1D * G1D.reshape((-1,1)) * G1D.reshape((-1,1,1))
         try:
             logger.debug('Trying astropy.convolution.convolve_fft for convolution')
@@ -256,6 +278,18 @@ class DeltaPDF3D(PythonAlgorithm):
             logger.debug('Using astropy.convolution.convolve for convolution')
             return convolve(signal, G3D)
 
+    def _deconvolution(self, shape):
+        from astropy.convolution import Gaussian1DKernel
+        G1D = Gaussian1DKernel(self.getProperty("ConvolutionWidth").value).array
+        G3D = G1D * G1D.reshape((-1,1)) * G1D.reshape((-1,1,1))
+        G3D_shape = np.array(G3D.shape)
+        G3D = np.pad(G3D,pad_width=np.array([np.maximum(np.floor((shape-G3D_shape)/2),np.zeros(len(shape))),
+                                             np.maximum(np.ceil((shape-G3D_shape)/2),np.zeros(len(shape)))],
+                                            dtype=np.int).transpose(),mode='constant')
+        deconv = np.fft.fftshift(np.fft.fftn(np.fft.ifftshift(G3D)))
+        iarr = (deconv.shape-shape)//2
+        return deconv[iarr[0]:shape[0]+iarr[0],iarr[1]:shape[1]+iarr[1],iarr[2]:shape[2]+iarr[2]]
+
     def _calc_new_extents(self, inWS):
         # Calculate new extents for fft space
         extents=''
diff --git a/Framework/PythonInterface/plugins/algorithms/LoadCIF.py b/Framework/PythonInterface/plugins/algorithms/LoadCIF.py
index 6854025decbe3fb80bd96caf2d3fa87e641c2002..15ad464ddf2adeba70425b0631ade117d9f52f00 100644
--- a/Framework/PythonInterface/plugins/algorithms/LoadCIF.py
+++ b/Framework/PythonInterface/plugins/algorithms/LoadCIF.py
@@ -76,8 +76,11 @@ class SpaceGroupBuilder(object):
 
     def _getCleanSpaceGroupSymbol(self, rawSpaceGroupSymbol):
         # Remove :1 and :H from the symbol. Those are not required at the moment because they are the default.
-        removalRe = re.compile(':[1H]', re.IGNORECASE)
-        return re.sub(removalRe, '', rawSpaceGroupSymbol).strip()
+        # Also substitute 'R' and 'Z' endings used by ICSD to indicate alternative origin choice or settings
+        mappings = {':[1Hh]':'', ' S$':'', ' H$':'', ' Z$':' :2', ' R$':' :r'}
+        for k, v in mappings.items():
+            rawSpaceGroupSymbol = re.sub(k, v, rawSpaceGroupSymbol)
+        return rawSpaceGroupSymbol.strip()
 
     def _getSpaceGroupFromNumber(self, cifData):
         spaceGroupNumber = [int(cifData[x]) for x in
diff --git a/Framework/PythonInterface/plugins/algorithms/LoadDNSLegacy.py b/Framework/PythonInterface/plugins/algorithms/LoadDNSLegacy.py
index 3e0d662c0bab2582ab528674e39ad3dcd7c9713c..9aa40f7e2982ea51066690b06bb991bf0b66b99d 100644
--- a/Framework/PythonInterface/plugins/algorithms/LoadDNSLegacy.py
+++ b/Framework/PythonInterface/plugins/algorithms/LoadDNSLegacy.py
@@ -1,6 +1,7 @@
 from __future__ import (absolute_import, division, print_function)
 import mantid.simpleapi as api
 import numpy as np
+from scipy.constants import m_n, h
 import os
 import sys
 from mantid.api import PythonAlgorithm, AlgorithmFactory, WorkspaceProperty, \
@@ -22,6 +23,7 @@ class LoadDNSLegacy(PythonAlgorithm):
         """
         PythonAlgorithm.__init__(self)
         self.tolerance = 1e-2
+        self.instrument = None
 
     def category(self):
         """
@@ -44,7 +46,7 @@ class LoadDNSLegacy(PythonAlgorithm):
                              "Name of DNS experimental data file.")
 
         self.declareProperty(FileProperty("CoilCurrentsTable", "",
-                                          FileAction.Load, ['.txt']),
+                                          FileAction.OptionalLoad, ['.txt']),
                              "Name of file containing table of coil currents and polarisations.")
 
         self.declareProperty(WorkspaceProperty("OutputWorkspace",
@@ -57,12 +59,23 @@ class LoadDNSLegacy(PythonAlgorithm):
 
     def get_polarisation_table(self):
         # load polarisation table
+        poltable = []
         poltable_name = self.getPropertyValue("CoilCurrentsTable")
+        if not poltable_name:
+            # read the table from IDF
+            for p in ['x', 'y', 'z']:
+                currents = self.instrument.getStringParameter("{}_currents".format(p))[0].split(';')
+                for cur in currents:
+                    row = {'polarisation': p, 'comment': '7'}
+                    row['C_a'], row['C_b'], row['C_c'], row['C_z'] = [float(c) for c in cur.split(',')]
+                    poltable.append(row)
+            self.log().debug("Loaded polarisation table:\n" + str(poltable))
+            return poltable
         try:
-            currents = np.genfromtxt(poltable_name, names=True, dtype=None)
+            currents = np.genfromtxt(poltable_name, names=True, dtype='U2,U2,f8,f8,f8,f8')
+            self.log().debug("Coil currents are: " + str(currents))
         except ValueError as err:
             raise RuntimeError("Invalid coil currents table: " + str(err))
-        poltable = []
         colnames = currents.dtype.names
         poltable = [dict(list(zip(colnames, cur))) for cur in currents]
         self.log().debug("Loaded polarisation table:\n" + str(poltable))
@@ -98,6 +111,8 @@ class LoadDNSLegacy(PythonAlgorithm):
             message = "File " + filename + " does not contain any data!"
             self.log().error(message)
             raise RuntimeError(message)
+        # sample logs
+        logs = {"names": [], "values": [], "units": []}
 
         # load run information
         metadata = DNSdata()
@@ -108,6 +123,10 @@ class LoadDNSLegacy(PythonAlgorithm):
             self.log().error(message)
             raise RuntimeError(message)
 
+        tmp = api.LoadEmptyInstrument(InstrumentName='DNS')
+        self.instrument = tmp.getInstrument()
+        api.DeleteWorkspace(tmp)
+
         # load polarisation table and determine polarisation
         poltable = self.get_polarisation_table()
         pol = self.get_polarisation(metadata, poltable)
@@ -116,10 +135,48 @@ class LoadDNSLegacy(PythonAlgorithm):
             self.log().warning("Failed to determine polarisation for " + filename +
                                ". Values have been set to undefined.")
         ndet = 24
-        # this needed to be able to use ConvertToMD
-        dataX = np.zeros(2*ndet)
-        dataX.fill(metadata.wavelength + 0.00001)
-        dataX[::2] -= 0.000002
+        unitX="Wavelength"
+        if metadata.tof_channel_number < 2:
+            dataX = np.zeros(2*ndet)
+            dataX.fill(metadata.wavelength + 0.00001)
+            dataX[::2] -= 0.000002
+        else:
+            unitX="TOF"
+
+            # get instrument parameters
+            l1 = np.linalg.norm(self.instrument.getSample().getPos() - self.instrument.getSource().getPos())
+            self.log().notice("L1 = {} m".format(l1))
+            dt_factor = float(self.instrument.getStringParameter("channel_width_factor")[0])
+
+            # channel width
+            dt = metadata.tof_channel_width*dt_factor
+            # calculate tof1
+            velocity = h/(m_n*metadata.wavelength*1e-10)   # m/s
+            tof1 = 1e+06*l1/velocity        # microseconds
+            self.log().debug("TOF1 = {} microseconds".format(tof1))
+            self.log().debug("Delay time = {} microsecond".format(metadata.tof_delay_time))
+            # create dataX array
+            x0 = tof1 + metadata.tof_delay_time
+            self.log().debug("TOF1 = {} microseconds".format(tof1))
+            dataX = np.linspace(x0, x0+metadata.tof_channel_number*dt, metadata.tof_channel_number+1)
+
+            # sample logs
+            logs["names"].extend(["channel_width", "TOF1", "delay_time", "tof_channels"])
+            logs["values"].extend([dt, tof1, metadata.tof_delay_time, metadata.tof_channel_number])
+            logs["units"].extend(["microseconds", "microseconds", "microseconds", ""])
+            if metadata.tof_elastic_channel:
+                logs["names"].append("EPP")
+                logs["values"].append(metadata.tof_elastic_channel)
+                logs["units"].append("")
+            if metadata.chopper_rotation_speed:
+                logs["names"].append("chopper_speed")
+                logs["values"].append(metadata.chopper_rotation_speed)
+                logs["units"].append("Hz")
+            if metadata.chopper_slits:
+                logs["names"].append("chopper_slits")
+                logs["values"].append(metadata.chopper_slits)
+                logs["units"].append("")
+
         # data normalization
         factor = 1.0
         yunit = "Counts"
@@ -141,7 +198,7 @@ class LoadDNSLegacy(PythonAlgorithm):
         dataE = np.sqrt(data_array[0:ndet, 1:])/factor
         # create workspace
         api.CreateWorkspace(OutputWorkspace=outws_name, DataX=dataX, DataY=dataY,
-                            DataE=dataE, NSpec=ndet, UnitX="Wavelength")
+                            DataE=dataE, NSpec=ndet, UnitX=unitX)
         outws = api.AnalysisDataService.retrieve(outws_name)
         api.LoadInstrument(outws, InstrumentName='DNS', RewriteSpectraMap=True)
 
@@ -156,68 +213,40 @@ class LoadDNSLegacy(PythonAlgorithm):
         # rotate the detector bank to the proper position
         api.RotateInstrumentComponent(outws, "bank0", X=0, Y=1, Z=0, Angle=metadata.deterota)
         # add sample log Ei and wavelength
-        api.AddSampleLog(outws, LogName='Ei', LogText=str(metadata.incident_energy),
-                         LogType='Number', LogUnit='meV')
-        api.AddSampleLog(outws, LogName='wavelength', LogText=str(metadata.wavelength),
-                         LogType='Number', LogUnit='Angstrom')
+        logs["names"].extend(["Ei", "wavelength"])
+        logs["values"].extend([metadata.incident_energy, metadata.wavelength])
+        logs["units"].extend(["meV", "Angstrom"])
+
         # add other sample logs
-        api.AddSampleLog(outws, LogName='deterota', LogText=str(metadata.deterota),
-                         LogType='Number', LogUnit='Degrees')
-        api.AddSampleLog(outws, 'mon_sum',
-                         LogText=str(float(metadata.monitor_counts)), LogType='Number')
-        api.AddSampleLog(outws, LogName='duration', LogText=str(metadata.duration),
-                         LogType='Number', LogUnit='Seconds')
-        api.AddSampleLog(outws, LogName='huber', LogText=str(metadata.huber),
-                         LogType='Number', LogUnit='Degrees')
-        api.AddSampleLog(outws, LogName='omega', LogText=str(metadata.huber - metadata.deterota),
-                         LogType='Number', LogUnit='Degrees')
-        api.AddSampleLog(outws, LogName='T1', LogText=str(metadata.temp1),
-                         LogType='Number', LogUnit='K')
-        api.AddSampleLog(outws, LogName='T2', LogText=str(metadata.temp2),
-                         LogType='Number', LogUnit='K')
-        api.AddSampleLog(outws, LogName='Tsp', LogText=str(metadata.tsp),
-                         LogType='Number', LogUnit='K')
-        # flipper
-        api.AddSampleLog(outws, LogName='flipper_precession',
-                         LogText=str(metadata.flipper_precession_current),
-                         LogType='Number', LogUnit='A')
-        api.AddSampleLog(outws, LogName='flipper_z_compensation',
-                         LogText=str(metadata.flipper_z_compensation_current),
-                         LogType='Number', LogUnit='A')
+        logs["names"].extend(["deterota", "mon_sum", "duration", "huber", "omega", "T1", "T2", "Tsp"])
+        logs["values"].extend([metadata.deterota, metadata.monitor_counts, metadata.duration,
+                               metadata.huber, metadata.huber - metadata.deterota,
+                               metadata.temp1, metadata.temp2, metadata.tsp])
+        logs["units"].extend(["Degrees", "Counts", "Seconds", "Degrees", "Degrees", "K", "K", "K"])
+
+        # flipper, coil currents and polarisation
         flipper_status = 'OFF'    # flipper OFF
         if abs(metadata.flipper_precession_current) > sys.float_info.epsilon:
             flipper_status = 'ON'    # flipper ON
-        api.AddSampleLog(outws, LogName='flipper',
-                         LogText=flipper_status, LogType='String')
-        # coil currents
-        api.AddSampleLog(outws, LogName='C_a', LogText=str(metadata.a_coil_current),
-                         LogType='Number', LogUnit='A')
-        api.AddSampleLog(outws, LogName='C_b', LogText=str(metadata.b_coil_current),
-                         LogType='Number', LogUnit='A')
-        api.AddSampleLog(outws, LogName='C_c', LogText=str(metadata.c_coil_current),
-                         LogType='Number', LogUnit='A')
-        api.AddSampleLog(outws, LogName='C_z', LogText=str(metadata.z_coil_current),
-                         LogType='Number', LogUnit='A')
-        # type of polarisation
-        api.AddSampleLog(outws, 'polarisation', LogText=pol[0], LogType='String')
-        api.AddSampleLog(outws, 'polarisation_comment', LogText=str(pol[1]), LogType='String')
+        logs["names"].extend(["flipper_precession", "flipper_z_compensation", "flipper",
+                              "C_a", "C_b", "C_c", "C_z", "polarisation", "polarisation_comment"])
+        logs["values"].extend([metadata.flipper_precession_current,
+                               metadata.flipper_z_compensation_current, flipper_status,
+                               metadata.a_coil_current, metadata.b_coil_current,
+                               metadata.c_coil_current, metadata.z_coil_current,
+                               str(pol[0]), str(pol[1])])
+        logs["units"].extend(["A", "A", "", "A", "A", "A", "A", "", ""])
+
         # slits
-        api.AddSampleLog(outws, LogName='slit_i_upper_blade_position',
-                         LogText=str(metadata.slit_i_upper_blade_position),
-                         LogType='Number', LogUnit='mm')
-        api.AddSampleLog(outws, LogName='slit_i_lower_blade_position',
-                         LogText=str(metadata.slit_i_lower_blade_position),
-                         LogType='Number', LogUnit='mm')
-        api.AddSampleLog(outws, LogName='slit_i_left_blade_position',
-                         LogText=str(metadata.slit_i_left_blade_position),
-                         LogType='Number', LogUnit='mm')
-        api.AddSampleLog(outws, 'slit_i_right_blade_position',
-                         LogText=str(metadata.slit_i_right_blade_position),
-                         LogType='Number', LogUnit='mm')
-        # data normalization
+        logs["names"].extend(["slit_i_upper_blade_position", "slit_i_lower_blade_position",
+                              "slit_i_left_blade_position", "slit_i_right_blade_position"])
+        logs["values"].extend([metadata.slit_i_upper_blade_position, metadata.slit_i_lower_blade_position,
+                               metadata.slit_i_left_blade_position, metadata.slit_i_right_blade_position])
+        logs["units"].extend(["mm", "mm", "mm", "mm"])
 
         # add information whether the data are normalized (duration/monitor/no):
         api.AddSampleLog(outws, LogName='normalized', LogText=norm, LogType='String')
+        api.AddSampleLogMultiple(outws, LogNames=logs["names"], LogValues=logs["values"], LogUnits=logs["units"])
 
         outws.setYUnit(yunit)
         outws.setYUnitLabel(ylabel)
diff --git a/Framework/PythonInterface/plugins/algorithms/MatchPeaks.py b/Framework/PythonInterface/plugins/algorithms/MatchPeaks.py
index 158b3cf90d0eb86c8eb9de38f87f5d3a31ba63f7..5fd4663c3dd95db1e797a66f852095fb07f53bfe 100644
--- a/Framework/PythonInterface/plugins/algorithms/MatchPeaks.py
+++ b/Framework/PythonInterface/plugins/algorithms/MatchPeaks.py
@@ -228,8 +228,10 @@ class MatchPeaks(PythonAlgorithm):
         @return          :: bin numbers of the peak positions
         """
 
+        fit_table_name = input_ws.getName() + '_epp'
+
         if isinstance(input_ws, MatrixWorkspace):
-            fit_table = FindEPP(InputWorkspace=input_ws)
+            fit_table = FindEPP(InputWorkspace=input_ws, OutputWorkspace=fit_table_name)
         elif isinstance(input_ws, ITableWorkspace):
             fit_table = input_ws
         else:
@@ -277,12 +279,7 @@ class MatchPeaks(PythonAlgorithm):
 
             logger.debug('Spectrum {0} will be shifted to bin {1}'.format(i,peak_bin[i]))
 
-        # Clean-up unused TableWorkspaces in try-catch
-        # Direct deletion causes problems when running in parallel for too many workspaces
-        try:
-            DeleteWorkspace(fit_table)
-        except ValueError:
-            logger.debug('Fit table already deleted')
+        DeleteWorkspace(fit_table)
 
         return peak_bin
 
diff --git a/Framework/PythonInterface/plugins/algorithms/PDToPDFgetN.py b/Framework/PythonInterface/plugins/algorithms/PDToPDFgetN.py
index ca4030a07254d41f336300011d3d53d4db000d63..d060bfde63b3f59389550ed9926526392065a356 100644
--- a/Framework/PythonInterface/plugins/algorithms/PDToPDFgetN.py
+++ b/Framework/PythonInterface/plugins/algorithms/PDToPDFgetN.py
@@ -1,7 +1,10 @@
 #pylint: disable=no-init
 from __future__ import (absolute_import, division, print_function)
-from mantid.simpleapi import *
-from mantid.api import *
+from mantid.simpleapi import AlignAndFocusPowder, AlignAndFocusPowderFromFiles, \
+    NormaliseByCurrent, PDDetermineCharacterizations,PDLoadCharacterizations, \
+    SaveGSS, SetUncertainties
+from mantid.api import mtd, AlgorithmFactory, DataProcessorAlgorithm, FileAction, \
+    FileProperty, MatrixWorkspaceProperty, PropertyMode
 from mantid.kernel import Direction, FloatArrayProperty
 import mantid
 
@@ -28,8 +31,7 @@ class PDToPDFgetN(DataProcessorAlgorithm):
                                           defaultValue="", action=FileAction.OptionalLoad,
                                           extensions=["_event.nxs", ".nxs.h5"]),
                              "Event file")
-        self.declareProperty("MaxChunkSize", 0.0,
-                             "Specify maximum Gbytes of file to read in one chunk.  Default is whole file.")
+        self.copyProperties('AlignAndFocusPowderFromFiles', 'MaxChunkSize')
         self.declareProperty("FilterBadPulses", 95.,
                              doc="Filter out events measured while proton " +
                              "charge is more than 5% below average")
@@ -47,6 +49,7 @@ class PDToPDFgetN(DataProcessorAlgorithm):
         self.declareProperty(MatrixWorkspaceProperty("OutputWorkspace", "",
                                                      direction=Direction.Output),
                              doc="Handle to reduced workspace")
+        self.copyProperties('AlignAndFocusPowderFromFiles', 'CacheDir')
         self.declareProperty(FileProperty(name="PDFgetNFile", defaultValue="", action=FileAction.Save,
                                           extensions=[".getn"]), "Output filename")
         self.setPropertyGroup("OutputWorkspace", group)
@@ -59,13 +62,9 @@ class PDToPDFgetN(DataProcessorAlgorithm):
                                           action=FileAction.OptionalLoad,
                                           extensions=["txt"]),
                              "File with characterization runs denoted")
-
-        self.declareProperty("RemovePromptPulseWidth", 0.0,
-                             "Width of events (in microseconds) near the prompt pulse to remove. 0 disables")
-        self.declareProperty("CropWavelengthMin", 0.,
-                             "Crop the data at this minimum wavelength.")
-        self.declareProperty("CropWavelengthMax", 0.,
-                             "Crop the data at this maximum wavelength.")
+        self.copyProperties('AlignAndFocusPowderFromFiles',
+                            ['FrequencyLogNames', 'WaveLengthLogNames', 'RemovePromptPulseWidth',
+                             'CropWavelengthMin', 'CropWavelengthMax'])
 
         self.declareProperty(FloatArrayProperty("Binning", values=[0., 0., 0.],
                                                 direction=Direction.Input),
@@ -102,7 +101,7 @@ class PDToPDFgetN(DataProcessorAlgorithm):
         return issues
 
     def _loadCharacterizations(self):
-        self._focusPos = {}
+        self._alignArgs = {}
         self._iparmFile = None
 
         charFilename = self.getProperty("CharacterizationRunsFile").value
@@ -113,48 +112,55 @@ class PDToPDFgetN(DataProcessorAlgorithm):
         results = PDLoadCharacterizations(Filename=charFilename,
                                           OutputWorkspace="characterizations")
         self._iparmFile = results[1]
-        self._focusPos['PrimaryFlightPath'] = results[2]
-        self._focusPos['SpectrumIDs'] = results[3]
-        self._focusPos['L2'] = results[4]
-        self._focusPos['Polar'] = results[5]
-        self._focusPos['Azimuthal'] = results[6]
+        self._alignArgs['PrimaryFlightPath'] = results[2]
+        self._alignArgs['SpectrumIDs'] = results[3]
+        self._alignArgs['L2'] = results[4]
+        self._alignArgs['Polar'] = results[5]
+        self._alignArgs['Azimuthal'] = results[6]
 
     def PyExec(self):
         self._loadCharacterizations()
+        charac = ""
+        if mtd.doesExist("characterizations"):
+            charac = "characterizations"
+
+        # arguments for both AlignAndFocusPowder and AlignAndFocusPowderFromFiles
+        self._alignArgs['OutputWorkspace'] = self.getPropertyValue("OutputWorkspace")
+        self._alignArgs['RemovePromptPulseWidth'] = self.getProperty("RemovePromptPulseWidth").value
+        self._alignArgs['CompressTolerance'] = COMPRESS_TOL_TOF
+        self._alignArgs['PreserveEvents'] = True
+        self._alignArgs['CalFileName'] = self.getProperty("CalibrationFile").value
+        self._alignArgs['Params']=self.getProperty("Binning").value
+        self._alignArgs['ResampleX']=self.getProperty("ResampleX").value
+        self._alignArgs['Dspacing']=True
+        self._alignArgs['CropWavelengthMin'] = self.getProperty('CropWavelengthMin').value
+        self._alignArgs['CropWavelengthMax'] = self.getProperty('CropWavelengthMax').value
+        self._alignArgs['ReductionProperties'] = '__snspowderreduction'
 
         wksp = self.getProperty("InputWorkspace").value
-        if wksp is None:
-            wksp = LoadEventAndCompress(Filename=self.getProperty("Filename").value,
-                                        OutputWorkspace=self.getPropertyValue("OutputWorkspace"),
-                                        MaxChunkSize=self.getProperty("MaxChunkSize").value,
-                                        FilterBadPulses=self.getProperty("FilterBadPulses").value,
-                                        CompressTOFTolerance=COMPRESS_TOL_TOF)
-            if wksp.getNumberEvents() <= 0: # checked InputWorkspace during validateInputs
-                raise RuntimeError("Workspace contains no events")
-        else:
+        if wksp is None:  # run from file with caching
+            wksp = AlignAndFocusPowderFromFiles(Filename=self.getProperty("Filename").value,
+                                                CacheDir=self.getProperty("CacheDir").value,
+                                                MaxChunkSize=self.getProperty("MaxChunkSize").value,
+                                                FilterBadPulses=self.getProperty("FilterBadPulses").value,
+                                                Characterizations=charac,
+                                                FrequencyLogNames=self.getProperty("FrequencyLogNames").value,
+                                                WaveLengthLogNames=self.getProperty("WaveLengthLogNames").value,
+                                                **(self._alignArgs))
+        else:  # process the input workspace
             self.log().information("Using input workspace. Ignoring properties 'Filename', " +
                                    "'OutputWorkspace', 'MaxChunkSize', and 'FilterBadPulses'")
 
-        charac = ""
-        if mtd.doesExist("characterizations"):
-            charac = "characterizations"
+            # get the correct row of the table
+            PDDetermineCharacterizations(InputWorkspace=wksp,
+                                         Characterizations=charac,
+                                         ReductionProperties="__snspowderreduction",
+                                         FrequencyLogNames=self.getProperty("FrequencyLogNames").value,
+                                         WaveLengthLogNames=self.getProperty("WaveLengthLogNames").value)
+
+            wksp = AlignAndFocusPowder(InputWorkspace=wksp,
+                                       **(self._alignArgs))
 
-        # get the correct row of the table
-        PDDetermineCharacterizations(InputWorkspace=wksp,
-                                     Characterizations=charac,
-                                     ReductionProperties="__snspowderreduction")
-
-        wksp = AlignAndFocusPowder(InputWorkspace=wksp, OutputWorkspace=wksp,
-                                   CalFileName=self.getProperty("CalibrationFile").value,
-                                   Params=self.getProperty("Binning").value,
-                                   ResampleX=self.getProperty("ResampleX").value, Dspacing=True,
-                                   PreserveEvents=False,
-                                   RemovePromptPulseWidth=self.getProperty("RemovePromptPulseWidth").value,
-                                   CompressTolerance=COMPRESS_TOL_TOF,
-                                   CropWavelengthMin=self.getProperty("CropWavelengthMin").value,
-                                   CropWavelengthMax=self.getProperty("CropWavelengthMax").value,
-                                   ReductionProperties="__snspowderreduction",
-                                   **(self._focusPos))
         wksp = NormaliseByCurrent(InputWorkspace=wksp, OutputWorkspace=wksp)
         wksp.getRun()['gsas_monitor'] = 1
         if self._iparmFile is not None:
diff --git a/Framework/PythonInterface/plugins/algorithms/SNSPowderReduction.py b/Framework/PythonInterface/plugins/algorithms/SNSPowderReduction.py
index 4b8f7a5f24372a386aefe2363bbef592f5d6112e..7c5f44729e2f80775a540387bce93d46175188a0 100644
--- a/Framework/PythonInterface/plugins/algorithms/SNSPowderReduction.py
+++ b/Framework/PythonInterface/plugins/algorithms/SNSPowderReduction.py
@@ -5,11 +5,11 @@ import os
 
 import mantid.simpleapi as api
 from mantid.api import mtd, AlgorithmFactory, AnalysisDataService, DataProcessorAlgorithm, \
-    FileAction, FileProperty, ITableWorkspaceProperty, MultipleFileProperty, PropertyMode, \
-    WorkspaceProperty, ITableWorkspace, MatrixWorkspace
+    FileAction, FileProperty, ITableWorkspaceProperty, PropertyMode, WorkspaceProperty, \
+    ITableWorkspace, MatrixWorkspace
 from mantid.kernel import ConfigService, Direction, FloatArrayProperty, \
     FloatBoundedValidator, IntArrayBoundedValidator, IntArrayProperty, \
-    Property, PropertyManagerDataService, StringArrayProperty, StringListValidator
+    PropertyManagerDataService, StringListValidator
 from mantid.dataobjects import SplittersWorkspace  # SplittersWorkspace
 # Use xrange in Python 2
 from six.moves import range #pylint: disable=redefined-builtin
@@ -150,11 +150,8 @@ class SNSPowderReduction(DataProcessorAlgorithm):
         return "The algorithm used for reduction of powder diffraction data obtained on SNS instruments (e.g. PG3) "
 
     def PyInit(self):
-        self.declareProperty(MultipleFileProperty(name="Filename",
-                                                  extensions=EXTENSIONS_NXS),
-                             "Event file")
-        self.declareProperty("PreserveEvents", True,
-                             "Argument to supply to algorithms that can change from events to histograms.")
+        self.copyProperties('AlignAndFocusPowderFromFiles', ['Filename', 'PreserveEvents'])
+
         self.declareProperty("Sum", False,
                              "Sum the runs. Does nothing for characterization runs")
         self.declareProperty("PushDataPositive", "None",
@@ -179,7 +176,7 @@ class SNSPowderReduction(DataProcessorAlgorithm):
                              doc="If specified overrides value in CharacterizationRunsFile. If -1 turns off correction."
                                  "")
         self.declareProperty(FileProperty(name="CalibrationFile",defaultValue="",action=FileAction.Load,
-                                          extensions=[".h5", ".hd5", ".hdf", ".cal"]))
+                                          extensions=[".h5", ".hd5", ".hdf", ".cal"]))  # CalFileName
         self.declareProperty(FileProperty(name="GroupingFile",defaultValue="",action=FileAction.OptionalLoad,
                                           extensions=[".xml"]), "Overrides grouping from CalibrationFile")
         self.declareProperty(FileProperty(name="CharacterizationRunsFile",
@@ -188,30 +185,16 @@ class SNSPowderReduction(DataProcessorAlgorithm):
                                           extensions=["txt"]), "File with characterization runs denoted")
         self.declareProperty(FileProperty(name="ExpIniFilename", defaultValue="", action=FileAction.OptionalLoad,
                                           extensions=[".ini"]))
-        self.declareProperty("UnwrapRef", 0.,
-                             "Reference total flight path for frame unwrapping. Zero skips the correction")
-        self.declareProperty("LowResRef", 0.,
-                             "Reference DIFC for resolution removal. Zero skips the correction")
-        self.declareProperty("CropWavelengthMin", 0.,
-                             "Crop the data at this minimum wavelength. Overrides LowResRef.")
-        self.declareProperty("CropWavelengthMax", 0.,
-                             "Crop the data at this maximum wavelength. Forces use of CropWavelengthMin.")
-        self.declareProperty("RemovePromptPulseWidth", 0.0,
-                             "Width of events (in microseconds) near the prompt pulse to remove. 0 disables")
-        self.declareProperty("MaxChunkSize", 0.0,
-                             "Specify maximum Gbytes of file to read in one chunk.  Default is whole file.")
-        self.declareProperty("FilterCharacterizations", False,
-                             "Filter the characterization runs using above parameters. This only works for event files."
-                             "")
+        self.copyProperties('AlignAndFocusPowderFromFiles',
+                            ['UnwrapRef', 'LowResRef', 'CropWavelengthMin', 'CropWavelengthMax', 'RemovePromptPulseWidth',
+                             'MaxChunkSize'])
         self.declareProperty(FloatArrayProperty("Binning", values=[0., 0., 0.],
                                                 direction=Direction.Input),
-                             "Positive is linear bins, negative is logorithmic")
-        self.declareProperty("ResampleX", 0,
-                             "Number of bins in x-axis. Non-zero value overrides \"Params\" property. "
-                             "Negative value means logorithmic binning.")
+                             "Positive is linear bins, negative is logorithmic")  # Params
+        self.copyProperties('AlignAndFocusPowderFromFiles', ['ResampleX'])
         self.declareProperty("BinInDspace", True,
                              "If all three bin parameters a specified, whether they are in dspace (true) or "
-                             "time-of-flight (false)")
+                             "time-of-flight (false)")  # DSpacing
         # section of vanadium run processing
         self.declareProperty("StripVanadiumPeaks", True,
                              "Subtract fitted vanadium peaks from the known positions.")
@@ -224,7 +207,7 @@ class SNSPowderReduction(DataProcessorAlgorithm):
         self.declareProperty("BackgroundSmoothParams", "", "Default=off, suggested 20,2")
 
         # filtering
-        self.declareProperty("FilterBadPulses", 95.,
+        self.declareProperty("FilterBadPulses", 95.,  # different default value
                              doc="Filter out events measured while proton charge is more than 5% below average")
         self.declareProperty("ScaleData", defaultValue=1., validator=FloatBoundedValidator(lower=0., exclusive=True),
                              doc="Constant to multiply the data before writing out. This does not apply to "
@@ -234,6 +217,7 @@ class SNSPowderReduction(DataProcessorAlgorithm):
                              "'pdfgetn', and 'topas'")
         self.declareProperty("OutputFilePrefix", "", "Overrides the default filename for the output file (Optional).")
         self.declareProperty(FileProperty(name="OutputDirectory",defaultValue="",action=FileAction.Directory))
+        self.copyProperties('AlignAndFocusPowderFromFiles', 'CacheDir')
         self.declareProperty("FinalDataUnits", "dSpacing", StringListValidator(["dSpacing","MomentumTransfer"]))
 
         workspace_prop = WorkspaceProperty('SplittersWorkspace', '', Direction.Input, PropertyMode.Optional)
@@ -247,19 +231,13 @@ class SNSPowderReduction(DataProcessorAlgorithm):
 
         self.declareProperty("LowResolutionSpectraOffset", -1,
                              "If larger and equal to 0, then process low resolution TOF and offset is the spectra "
-                             "number. Otherwise, ignored.")
+                             "number. Otherwise, ignored.")  # LowResolutionSpectraOffset
 
         self.declareProperty("NormalizeByCurrent", True, "Normalize by current")
 
         self.declareProperty("CompressTOFTolerance", 0.01, "Tolerance to compress events in TOF.")
 
-        self.declareProperty(StringArrayProperty("FrequencyLogNames", ["SpeedRequest1", "Speed1", "frequency"],
-                                                 direction=Direction.Input),
-                             "Possible log names for frequency.")
-
-        self.declareProperty(StringArrayProperty("WaveLengthLogNames", ["LambdaRequest", "lambda"],
-                                                 direction=Direction.Input),
-                             "Candidate log names for wave length.")
+        self.copyProperties('AlignAndFocusPowderFromFiles', ['FrequencyLogNames', 'WaveLengthLogNames'])
 
         return
 
@@ -289,7 +267,7 @@ class SNSPowderReduction(DataProcessorAlgorithm):
         self._vanPeakFWHM = self.getProperty("VanadiumFWHM").value
         self._vanSmoothing = self.getProperty("VanadiumSmoothParams").value
         self._vanRadius = self.getProperty("VanadiumRadius").value
-        calib = self.getProperty("CalibrationFile").value
+        self.calib = self.getProperty("CalibrationFile").value
         self._scaleFactor = self.getProperty("ScaleData").value
         self._outDir = self.getProperty("OutputDirectory").value
         self._outPrefix = self.getProperty("OutputFilePrefix").value.strip()
@@ -365,8 +343,7 @@ class SNSPowderReduction(DataProcessorAlgorithm):
             if self._splittersWS is not None:
                 raise NotImplementedError("Summing spectra and filtering events are not supported simultaneously.")
 
-            sam_ws_name = self._focusAndSum(samRuns, sample_time_filter_wall, calib,
-                                            reload_if_loaded=reload_event_file,
+            sam_ws_name = self._focusAndSum(samRuns, reload_if_loaded=reload_event_file,
                                             preserveEvents=preserveEvents)
             assert isinstance(sam_ws_name, str), 'Returned from _focusAndSum() must be a string but not' \
                                                  '%s. ' % str(type(sam_ws_name))
@@ -379,11 +356,15 @@ class SNSPowderReduction(DataProcessorAlgorithm):
             for sam_run_number in samRuns:
                 # first round of processing the sample
                 self._info = None
-                returned = self._focusChunks(sam_run_number, sample_time_filter_wall, calib,
-                                             splitwksp=self._splittersWS,
-                                             normalisebycurrent=self._normalisebycurrent,
-                                             reload_if_loaded=reload_event_file,
-                                             preserveEvents=preserveEvents)
+                if sample_time_filter_wall[0] == 0. and sample_time_filter_wall[-1] == 0. \
+                        and self._splittersWS is None:
+                    returned = self._focusAndSum([sam_run_number], reload_if_loaded=reload_event_file,
+                                                 preserveEvents=preserveEvents)
+                else:
+                    returned = self._focusChunks(sam_run_number, sample_time_filter_wall,
+                                                 splitwksp=self._splittersWS,
+                                                 reload_if_loaded=reload_event_file,
+                                                 preserveEvents=preserveEvents)
 
                 if isinstance(returned, list):
                     # Returned with a list of workspaces
@@ -416,8 +397,7 @@ class SNSPowderReduction(DataProcessorAlgorithm):
             # process the container
             can_run_numbers = self._info["container"].value
             can_run_numbers = ['%s_%d' % (self._instrument, value) for value in can_run_numbers]
-            can_run_ws_name = self._process_container_runs(can_run_numbers, sample_time_filter_wall,
-                                                           samRunIndex, calib, preserveEvents)
+            can_run_ws_name = self._process_container_runs(can_run_numbers, samRunIndex, preserveEvents)
             if can_run_ws_name is not None:
                 workspacelist.append(can_run_ws_name)
 
@@ -426,7 +406,7 @@ class SNSPowderReduction(DataProcessorAlgorithm):
             van_run_number_list = ['%s_%d' % (self._instrument, value) for value in van_run_number_list]
             van_specified = not noRunSpecified(van_run_number_list)
             if van_specified:
-                van_run_ws_name = self._process_vanadium_runs(van_run_number_list, sample_time_filter_wall, samRunIndex, calib)
+                van_run_ws_name = self._process_vanadium_runs(van_run_number_list, samRunIndex)
                 workspacelist.append(van_run_ws_name)
             else:
                 van_run_ws_name = None
@@ -536,17 +516,19 @@ class SNSPowderReduction(DataProcessorAlgorithm):
         charFilename = self.getProperty("CharacterizationRunsFile").value
         expIniFilename = self.getProperty("ExpIniFilename").value
 
+        self._charTable = ''
         if charFilename is None or len(charFilename) <= 0:
             self.iparmFile = None
             return
 
+        self._charTable = 'characterizations'
         results = api.PDLoadCharacterizations(Filename=charFilename,
                                               ExpIniFilename=expIniFilename,
-                                              OutputWorkspace="characterizations")
+                                              OutputWorkspace=self._charTable)
         # export the characterizations table
-        self._charTable = results[0]
-        self.declareProperty(ITableWorkspaceProperty("CharacterizationsTable", "characterizations", Direction.Output))
-        self.setProperty("CharacterizationsTable", self._charTable)
+        charTable = results[0]
+        self.declareProperty(ITableWorkspaceProperty("CharacterizationsTable", self._charTable, Direction.Output))
+        self.setProperty("CharacterizationsTable", charTable)
 
         # get the focus positions from the properties
         self.iparmFile = results[1]
@@ -682,7 +664,7 @@ class SNSPowderReduction(DataProcessorAlgorithm):
             raise RuntimeError("Cannot add incompatible wavelengths (%f != %f)"
                                % (left["wavelength"].value, right["wavelength"].value))
 
-    def _loadAndSum(self, filename_list, outName, **filterWall):
+    def _loadAndSum(self, filename_list, outName):
         """
         Load and sum
         Purpose:
@@ -713,7 +695,7 @@ class SNSPowderReduction(DataProcessorAlgorithm):
                                      OutputWorkspace=ws_name,
                                      MaxChunkSize=self._chunks,
                                      FilterBadPulses=self._filterBadPulses,
-                                     CompressTOFTolerance=self.COMPRESS_TOL_TOF, **filterWall)
+                                     CompressTOFTolerance=self.COMPRESS_TOL_TOF)
             if is_event_workspace(ws_name):
                 self.log().notice('Load event file %s, compress it and get %d events.' %
                                   (filename, get_workspace(ws_name).getNumberEvents()))
@@ -759,7 +741,7 @@ class SNSPowderReduction(DataProcessorAlgorithm):
         return outName
 
     #pylint: disable=too-many-arguments
-    def _focusAndSum(self, filenames, filterWall, calib, preserveEvents=True, reload_if_loaded=True):
+    def _focusAndSum(self, filenames, preserveEvents=True, reload_if_loaded=True, final_name=None):
         """Load, sum, and focus data in chunks
         Purpose:
             Load, sum and focus data in chunks;
@@ -769,64 +751,54 @@ class SNSPowderReduction(DataProcessorAlgorithm):
             The experimental runs are focused and summed together
         @param run_number_list:
         @param extension:
-        @param filterWall:
-        @param calib:
         @param preserveEvents:
         @return: string as the summed workspace's name
         """
-        sumRun = None
-        info = None
-
-        for filename in filenames:
-            self.log().information("[Sum] Process run number %s. " % filename)
-
-            # focus one run
-            out_ws_name = self._focusChunks(filename, filterWall, calib,
-                                            reload_if_loaded=reload_if_loaded,
-                                            normalisebycurrent=False,
-                                            preserveEvents=preserveEvents)
-            assert isinstance(out_ws_name, str), 'Output from _focusChunks() should be a string but' \
-                                                 ' not %s.' % str(type(out_ws_name))
-            assert self.does_workspace_exist(out_ws_name)
-
-            tempinfo = self._getinfo(out_ws_name)
-
-            # sum reduced runs
-            if sumRun is None:
-                # First run. No need to sumRun
-                sumRun = out_ws_name
-                info = tempinfo
-            else:
-                # Non-first run. Add this run to current summed run
-                self.checkInfoMatch(info, tempinfo)
-                # add current workspace to sub sum
-                api.Plus(LHSWorkspace=sumRun, RHSWorkspace=out_ws_name, OutputWorkspace=sumRun,
-                         ClearRHSWorkspace=allEventWorkspaces(sumRun, out_ws_name))
-                if is_event_workspace(sumRun) and self.COMPRESS_TOL_TOF > 0.:
-                    api.CompressEvents(InputWorkspace=sumRun, OutputWorkspace=sumRun,
-                                       Tolerance=self.COMPRESS_TOL_TOF)  # 10ns
-                # after adding all events, delete the current workspace.
-                api.DeleteWorkspace(out_ws_name)
-            # ENDIF
-        # ENDFOR (processing each)
+        if final_name is None:
+            final_name = getBasename(filenames[0])
+        api.AlignAndFocusPowderFromFiles(Filename=','.join(filenames),
+                                         OutputWorkspace=final_name,
+                                         MaxChunkSize=self._chunks,
+                                         FilterBadPulses=self._filterBadPulses,
+                                         Characterizations=self._charTable,
+                                         CacheDir=self.getProperty("CacheDir").value,
+                                         CalFileName=self.calib,
+                                         GroupFilename=self.getProperty("GroupingFile").value,
+                                         Params=self._binning,
+                                         ResampleX=self._resampleX,
+                                         Dspacing=self._bin_in_dspace,
+                                         PreserveEvents=preserveEvents,
+                                         RemovePromptPulseWidth=self._removePromptPulseWidth,
+                                         CompressTolerance=self.COMPRESS_TOL_TOF,
+                                         UnwrapRef=self._LRef,
+                                         LowResRef=self._DIFCref,
+                                         LowResSpectrumOffset=self._lowResTOFoffset,
+                                         CropWavelengthMin=self._wavelengthMin,
+                                         CropWavelengthMax=self._wavelengthMax,
+                                         FrequencyLogNames=self.getProperty("FrequencyLogNames").value,
+                                         WaveLengthLogNames=self.getProperty("WaveLengthLogNames").value,
+                                         ReductionProperties="__snspowderreduction_inner",
+                                         **self._focusPos)
+
+        #TODO make sure that this funny function is called
+        #self.checkInfoMatch(info, tempinfo)
 
         if self._normalisebycurrent is True:
-            api.NormaliseByCurrent(InputWorkspace=sumRun,
-                                   OutputWorkspace=sumRun,
+            api.NormaliseByCurrent(InputWorkspace=final_name,
+                                   OutputWorkspace=final_name,
                                    RecalculatePCharge=True)
-            get_workspace(sumRun).getRun()['gsas_monitor'] = 1
+            get_workspace(final_name).getRun()['gsas_monitor'] = 1
 
-        return sumRun
+        return final_name
 
     #pylint: disable=too-many-arguments,too-many-locals,too-many-branches
-    def _focusChunks(self, filename, filter_wall, calib,  # noqa
-                     normalisebycurrent, splitwksp=None, preserveEvents=True,
+    def _focusChunks(self, filename, filter_wall=(0.,0.),  # noqa
+                     splitwksp=None, preserveEvents=True,
                      reload_if_loaded=True):  # noqa
         """
         Load, (optional) split and focus data in chunks
         @param filename: integer for run number
         @param filter_wall:  Enabled if splitwksp is defined
-        @param calib:
         @param normalisebycurrent: Set to False if summing runs for correct math
         @param splitwksp: SplittersWorkspace (if None then no split)
         @param preserveEvents:
@@ -903,7 +875,7 @@ class SNSPowderReduction(DataProcessorAlgorithm):
                 self.log().notice('Align and focus workspace %s' % out_ws_name_chunk_split)
                 api.AlignAndFocusPowder(InputWorkspace=out_ws_name_chunk_split,
                                         OutputWorkspace=out_ws_name_chunk_split,
-                                        CalFileName=calib,
+                                        CalFileName=self.calib,
                                         GroupFilename=self.getProperty("GroupingFile").value,
                                         Params=self._binning,
                                         ResampleX=self._resampleX,
@@ -987,7 +959,7 @@ class SNSPowderReduction(DataProcessorAlgorithm):
                                    OutputWorkspace=output_wksp_list[split_index],
                                    Tolerance=self.COMPRESS_TOL_TOF)  # 100ns
             try:
-                if normalisebycurrent is True:
+                if self._normalisebycurrent is True:
                     api.NormaliseByCurrent(InputWorkspace=output_wksp_list[split_index],
                                            OutputWorkspace=output_wksp_list[split_index],
                                            RecalculatePCharge=True)
@@ -1028,24 +1000,14 @@ class SNSPowderReduction(DataProcessorAlgorithm):
         assert self.does_workspace_exist(wksp_name)
 
         # Determine characterization
-        if mtd.doesExist("characterizations"):
-            # get the correct row of the table if table workspace 'charactersizations' exists
-            api.PDDetermineCharacterizations(InputWorkspace=wksp_name,
-                                             Characterizations="characterizations",
-                                             ReductionProperties="__snspowderreduction",
-                                             BackRun=self.getProperty("BackgroundNumber").value,
-                                             NormRun=self.getProperty("VanadiumNumber").value,
-                                             NormBackRun=self.getProperty("VanadiumBackgroundNumber").value,
-                                             FrequencyLogNames=self.getProperty("FrequencyLogNames").value,
-                                             WaveLengthLogNames=self.getProperty("WaveLengthLogNames").value)
-        else:
-            api.PDDetermineCharacterizations(InputWorkspace=wksp_name,
-                                             ReductionProperties="__snspowderreduction",
-                                             BackRun=self.getProperty("BackgroundNumber").value,
-                                             NormRun=self.getProperty("VanadiumNumber").value,
-                                             NormBackRun=self.getProperty("VanadiumBackgroundNumber").value,
-                                             FrequencyLogNames=self.getProperty("FrequencyLogNames").value,
-                                             WaveLengthLogNames=self.getProperty("WaveLengthLogNames").value)
+        api.PDDetermineCharacterizations(InputWorkspace=wksp_name,
+                                         Characterizations=self._charTable,
+                                         ReductionProperties="__snspowderreduction",
+                                         BackRun=self.getProperty("BackgroundNumber").value,
+                                         NormRun=self.getProperty("VanadiumNumber").value,
+                                         NormBackRun=self.getProperty("VanadiumBackgroundNumber").value,
+                                         FrequencyLogNames=self.getProperty("FrequencyLogNames").value,
+                                         WaveLengthLogNames=self.getProperty("WaveLengthLogNames").value)
 
         # convert the result into a dict
         return PropertyManagerDataService.retrieve("__snspowderreduction")
@@ -1268,8 +1230,7 @@ class SNSPowderReduction(DataProcessorAlgorithm):
 
         return do_split_raw_wksp, num_out_wksp
 
-    def _process_container_runs(self, can_run_numbers, timeFilterWall, samRunIndex, calib,
-                                preserveEvents):
+    def _process_container_runs(self, can_run_numbers, samRunIndex, preserveEvents):
         """ Process container runs
         :param can_run_numbers:
         :return:
@@ -1282,15 +1243,6 @@ class SNSPowderReduction(DataProcessorAlgorithm):
         else:
             # reduce container run such that it can be removed from sample run
 
-            # set up the filters
-            if self.getProperty("FilterCharacterizations").value:
-                # use common time filter
-                canFilterWall = timeFilterWall
-            else:
-                # no time filter
-                canFilterWall = (0., 0.)
-            # END-IF
-
             if len(can_run_numbers) == 1:
                 # only 1 container run
                 can_run_number = can_run_numbers[0]
@@ -1306,19 +1258,10 @@ class SNSPowderReduction(DataProcessorAlgorithm):
                                  OutputWorkspace=can_run_ws_name,
                                  Target="TOF")
             else:
-                # load the container run
+                fileArg = [can_run_number]
                 if self.getProperty("Sum").value:
-                    can_run_ws_name = self._focusAndSum(can_run_numbers, canFilterWall, calib,
-                                                        preserveEvents=preserveEvents)
-                else:
-                    can_run_ws_name = self._focusChunks(can_run_number, canFilterWall, calib,
-                                                        normalisebycurrent=self._normalisebycurrent,
-                                                        preserveEvents=preserveEvents)
-
-                # convert unit to TOF
-                api.ConvertUnits(InputWorkspace=can_run_ws_name,
-                                 OutputWorkspace=can_run_ws_name,
-                                 Target="TOF")
+                    fileArg = can_run_numbers
+                self._focusAndSum(fileArg, preserveEvents, final_name=can_run_ws_name)
 
                 # smooth background
                 smoothParams = self.getProperty("BackgroundSmoothParams").value
@@ -1335,7 +1278,7 @@ class SNSPowderReduction(DataProcessorAlgorithm):
 
         return can_run_ws_name
 
-    def _process_vanadium_runs(self, van_run_number_list, timeFilterWall, samRunIndex, calib, **dummy_focuspos):
+    def _process_vanadium_runs(self, van_run_number_list, samRunIndex, **dummy_focuspos):
         """
         Purpose: process vanadium runs
         Requirements: if more than 1 run in given run number list, then samRunIndex must be given.
@@ -1343,7 +1286,6 @@ class SNSPowderReduction(DataProcessorAlgorithm):
         :param van_run_number_list: list of vanadium run
         :param timeFilterWall: time filter wall
         :param samRunIndex: sample run index
-        :param calib: calibration run
         :param focuspos:
         :return:
         """
@@ -1364,18 +1306,12 @@ class SNSPowderReduction(DataProcessorAlgorithm):
         else:
             # Explicitly load, reduce and correct vanadium runs
 
-            # set up filter wall for van run
-            if self.getProperty("FilterCharacterizations").value:
-                vanFilterWall = {'FilterByTimeStart': timeFilterWall[0], 'FilterByTimeStop': timeFilterWall[1]}
-            else:
-                vanFilterWall = {'FilterByTimeStart': Property.EMPTY_DBL, 'FilterByTimeStop': Property.EMPTY_DBL}
-
             # load the vanadium
             van_run_ws_name = getBasename(van_run_number)
             if self.getProperty("Sum").value:
-                van_run_ws_name = self._loadAndSum(van_run_number_list, van_run_ws_name, **vanFilterWall)
+                van_run_ws_name = self._loadAndSum(van_run_number_list, van_run_ws_name)
             else:
-                van_run_ws_name = self._loadAndSum([van_run_number], van_run_ws_name, **vanFilterWall)
+                van_run_ws_name = self._loadAndSum([van_run_number], van_run_ws_name)
 
             # load the vanadium background (if appropriate)
             van_bkgd_run_number_list = self._info["vanadium_background"].value
@@ -1391,9 +1327,9 @@ class SNSPowderReduction(DataProcessorAlgorithm):
 
                 # load background runs and sum if necessary
                 if self.getProperty("Sum").value:
-                    van_bkgd_ws_name = self._loadAndSum(van_bkgd_run_number_list, van_bkgd_ws_name, **vanFilterWall)
+                    van_bkgd_ws_name = self._loadAndSum(van_bkgd_run_number_list, van_bkgd_ws_name)
                 else:
-                    van_bkgd_ws_name = self._loadAndSum([van_bkgd_run_number], van_bkgd_ws_name, **vanFilterWall)
+                    van_bkgd_ws_name = self._loadAndSum([van_bkgd_run_number], van_bkgd_ws_name)
 
                 van_bkgd_ws = get_workspace(van_bkgd_ws_name)
                 if van_bkgd_ws.id() == EVENT_WORKSPACE_ID and van_bkgd_ws.getNumberEvents() <= 0:
@@ -1437,7 +1373,7 @@ class SNSPowderReduction(DataProcessorAlgorithm):
             self.log().warning('Reducing vanadium run %s.' % van_run_ws_name)
             api.AlignAndFocusPowder(InputWorkspace=van_run_ws_name,
                                     OutputWorkspace=van_run_ws_name,
-                                    CalFileName=calib,
+                                    CalFileName=self.calib,
                                     GroupFilename=self.getProperty("GroupingFile").value,
                                     Params=self._binning,
                                     ResampleX=self._resampleX,
diff --git a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/BayesQuasi.py b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/BayesQuasi.py
index ce10d94d7bf42eb6b46abc3712bd0631a6561538..cd3908232e0272f728eac9a6c53407b3edce6e3f 100644
--- a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/BayesQuasi.py
+++ b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/BayesQuasi.py
@@ -1,4 +1,4 @@
-#pylint: disable=invalid-name,too-many-instance-attributes,too-many-branches,no-init,redefined-builtin
+# pylint: disable=invalid-name,too-many-instance-attributes,too-many-branches,no-init,redefined-builtin
 from __future__ import (absolute_import, division, print_function)
 from six.moves import range
 from six import next
@@ -13,16 +13,16 @@ from mantid.kernel import StringListValidator, Direction
 import mantid.simpleapi as s_api
 from mantid import config, logger
 from IndirectCommon import *
+
 MTD_PLOT = import_mantidplot()
 
 if is_supported_f2py_platform():
-    QLr     = import_f2py("QLres")
-    QLd     = import_f2py("QLdata")
-    Qse     = import_f2py("QLse")
+    QLr = import_f2py("QLres")
+    QLd = import_f2py("QLdata")
+    Qse = import_f2py("QLse")
 
 
 class BayesQuasi(PythonAlgorithm):
-
     _program = None
     _samWS = None
     _resWS = None
@@ -42,16 +42,16 @@ class BayesQuasi(PythonAlgorithm):
         return "Workflow\\MIDAS"
 
     def summary(self):
-        return "This algorithm runs the Fortran QLines programs which fits a Delta function of"+\
-               " amplitude 0 and Lorentzians of amplitude A(j) and HWHM W(j) where j=1,2,3. The"+\
-               " whole function is then convoled with the resolution function."
+        return "This algorithm runs the Fortran QLines programs which fits a Delta function of" + \
+               " amplitude 0 and Lorentzians of amplitude A(j) and HWHM W(j) where j=1,2,3. The" + \
+               " whole function is then convolved with the resolution function."
 
     def version(self):
         return 1
 
     def PyInit(self):
         self.declareProperty(name='Program', defaultValue='QL',
-                             validator=StringListValidator(['QL','QSe']),
+                             validator=StringListValidator(['QL', 'QSe']),
                              doc='The type of program to run (either QL or QSe)')
 
         self.declareProperty(MatrixWorkspaceProperty('SampleWorkspace', '', direction=Direction.Input),
@@ -81,7 +81,7 @@ class BayesQuasi(PythonAlgorithm):
                              doc='Fit option for using the elastic peak')
 
         self.declareProperty(name='Background', defaultValue='Flat',
-                             validator=StringListValidator(['Sloping','Flat','Zero']),
+                             validator=StringListValidator(['Sloping', 'Flat', 'Zero']),
                              doc='Fit option for the type of background')
 
         self.declareProperty(name='FixedWidth', defaultValue=True,
@@ -131,7 +131,7 @@ class BayesQuasi(PythonAlgorithm):
         self._wfile = self.getPropertyValue('WidthFile')
         self._loop = self.getProperty('Loop').value
 
-    #pylint: disable=too-many-locals,too-many-statements
+    # pylint: disable=too-many-locals,too-many-statements
     def PyExec(self):
 
         # Check for platform support
@@ -142,18 +142,18 @@ class BayesQuasi(PythonAlgorithm):
             raise RuntimeError(unsupported_msg)
 
         from IndirectBayes import (CalcErange, GetXYE)
-        setup_prog = Progress(self, start=0.0, end=0.3, nreports = 5)
+        setup_prog = Progress(self, start=0.0, end=0.3, nreports=5)
         self.log().information('BayesQuasi input')
 
         erange = [self._e_min, self._e_max]
         nbins = [self._sam_bins, self._res_bins]
         setup_prog.report('Converting to binary for Fortran')
-        #convert true/false to 1/0 for fortran
+        # convert true/false to 1/0 for fortran
         o_el = 1 if self._elastic else 0
         o_w1 = 1 if self._width else 0
         o_res = 1 if self._res_norm else 0
 
-        #fortran code uses background choices defined using the following numbers
+        # fortran code uses background choices defined using the following numbers
         setup_prog.report('Encoding input options')
         if self._background == 'Sloping':
             o_bgd = 2
@@ -170,11 +170,11 @@ class BayesQuasi(PythonAlgorithm):
             workdir = os.getcwd()
             logger.information('Default Save directory is not set. Defaulting to current working Directory: ' + workdir)
 
-        array_len = 4096                           # length of array in Fortran
+        array_len = 4096  # length of array in Fortran
         setup_prog.report('Checking X Range')
-        CheckXrange(erange,'Energy')
+        CheckXrange(erange, 'Energy')
 
-        nbin,nrbin = nbins[0], nbins[1]
+        nbin, nrbin = nbins[0], nbins[1]
 
         logger.information('Sample is ' + self._samWS)
         logger.information('Resolution is ' + self._resWS)
@@ -195,16 +195,16 @@ class BayesQuasi(PythonAlgorithm):
         erange = [self._e_min, self._e_max]
 
         setup_prog.report('Checking Analysers')
-        CheckAnalysers(self._samWS,self._resWS)
+        CheckAnalysers(self._samWS, self._resWS)
         setup_prog.report('Obtaining EFixed, theta and Q')
         efix = getEfixed(self._samWS)
         theta, Q = GetThetaQ(self._samWS)
 
-        nsam,ntc = CheckHistZero(self._samWS)
+        nsam, ntc = CheckHistZero(self._samWS)
 
         totalNoSam = nsam
 
-        #check if we're performing a sequential fit
+        # check if we're performing a sequential fit
         if not self._loop:
             nsam = 1
 
@@ -213,54 +213,54 @@ class BayesQuasi(PythonAlgorithm):
         setup_prog.report('Checking Histograms')
         if self._program == 'QL':
             if nres == 1:
-                prog = 'QLr'                        # res file
+                prog = 'QLr'  # res file
             else:
-                prog = 'QLd'                        # data file
-                CheckHistSame(self._samWS,'Sample',self._resWS,'Resolution')
+                prog = 'QLd'  # data file
+                CheckHistSame(self._samWS, 'Sample', self._resWS, 'Resolution')
         elif self._program == 'QSe':
             if nres == 1:
-                prog = 'QSe'                        # res file
+                prog = 'QSe'  # res file
             else:
                 raise ValueError('Stretched Exp ONLY works with RES file')
 
-        logger.information('Version is ' +prog)
-        logger.information(' Number of spectra = '+str(nsam))
-        logger.information(' Erange : '+str(erange[0])+' to '+str(erange[1]))
+        logger.information('Version is ' + prog)
+        logger.information(' Number of spectra = ' + str(nsam))
+        logger.information(' Erange : ' + str(erange[0]) + ' to ' + str(erange[1]))
 
         setup_prog.report('Reading files')
-        Wy,We = self._read_width_file(self._width,self._wfile,totalNoSam)
-        dtn,xsc = self._read_norm_file(self._res_norm,self._resnormWS,totalNoSam)
+        Wy, We = self._read_width_file(self._width, self._wfile, totalNoSam)
+        dtn, xsc = self._read_norm_file(self._res_norm, self._resnormWS, totalNoSam)
 
         setup_prog.report('Establishing output workspace name')
-        fname = self._samWS[:-4] + '_'+ prog
+        fname = self._samWS[:-4] + '_' + prog
         probWS = fname + '_Prob'
         fitWS = fname + '_Fit'
-        wrks=os.path.join(workdir, self._samWS[:-4])
-        logger.information(' lptfile : '+wrks+'_'+prog+'.lpt')
-        lwrk=len(wrks)
-        wrks.ljust(140,' ')
-        wrkr=self._resWS
-        wrkr.ljust(140,' ')
+        wrks = os.path.join(workdir, self._samWS[:-4])
+        logger.information(' lptfile : ' + wrks + '_' + prog + '.lpt')
+        lwrk = len(wrks)
+        wrks.ljust(140, ' ')
+        wrkr = self._resWS
+        wrkr.ljust(140, ' ')
 
         setup_prog.report('Initialising probability list')
         # initialise probability list
         if self._program == 'QL':
             prob0, prob1, prob2 = [], [], []
         xQ = np.array([Q[0]])
-        for m in range(1,nsam):
-            xQ = np.append(xQ,Q[m])
+        for m in range(1, nsam):
+            xQ = np.append(xQ, Q[m])
         xProb = xQ
-        xProb = np.append(xProb,xQ)
-        xProb = np.append(xProb,xQ)
-        eProb = np.zeros(3*nsam)
+        xProb = np.append(xProb, xQ)
+        xProb = np.append(xProb, xQ)
+        eProb = np.zeros(3 * nsam)
 
         group = ''
-        workflow_prog = Progress(self, start=0.3, end=0.7, nreports=nsam*3)
-        for spectrum in range(0,nsam):
-            logger.information('Group ' +str(spectrum)+ ' at angle '+ str(theta[spectrum]))
-            nsp = spectrum+1
+        workflow_prog = Progress(self, start=0.3, end=0.7, nreports=nsam * 3)
+        for spectrum in range(0, nsam):
+            logger.information('Group ' + str(spectrum) + ' at angle ' + str(theta[spectrum]))
+            nsp = spectrum + 1
 
-            nout,bnorm,Xdat,Xv,Yv,Ev = CalcErange(self._samWS,spectrum,erange,nbin)
+            nout, bnorm, Xdat, Xv, Yv, Ev = CalcErange(self._samWS, spectrum, erange, nbin)
             Ndat = nout[0]
             Imin = nout[1]
             Imax = nout[2]
@@ -268,33 +268,33 @@ class BayesQuasi(PythonAlgorithm):
                 mm = spectrum
             else:
                 mm = 0
-            Nb,Xb,Yb,Eb = GetXYE(self._resWS,mm,array_len)     # get resolution data
+            Nb, Xb, Yb, Eb = GetXYE(self._resWS, mm, array_len)  # get resolution data
             numb = [nsam, nsp, ntc, Ndat, nbin, Imin, Imax, Nb, nrbin]
             rscl = 1.0
             reals = [efix, theta[spectrum], rscl, bnorm]
 
             if prog == 'QLr':
                 workflow_prog.report('Processing Sample number %i as Lorentzian' % spectrum)
-                nd,xout,yout,eout,yfit,yprob=QLr.qlres(numb,Xv,Yv,Ev,reals,fitOp,
-                                                       Xdat,Xb,Yb,Wy,We,dtn,xsc,
-                                                       wrks,wrkr,lwrk)
-                message = ' Log(prob) : '+str(yprob[0])+' '+str(yprob[1])+' '+str(yprob[2])+' '+str(yprob[3])
+                nd, xout, yout, eout, yfit, yprob = QLr.qlres(numb, Xv, Yv, Ev, reals, fitOp,
+                                                              Xdat, Xb, Yb, Wy, We, dtn, xsc,
+                                                              wrks, wrkr, lwrk)
+                message = ' Log(prob) : ' + str(yprob[0]) + ' ' + str(yprob[1]) + ' ' + str(yprob[2]) + ' ' + str(yprob[3])
                 logger.information(message)
             if prog == 'QLd':
                 workflow_prog.report('Processing Sample number %i' % spectrum)
-                nd,xout,yout,eout,yfit,yprob=QLd.qldata(numb,Xv,Yv,Ev,reals,fitOp,
-                                                        Xdat,Xb,Yb,Eb,Wy,We,
-                                                        wrks,wrkr,lwrk)
-                message = ' Log(prob) : '+str(yprob[0])+' '+str(yprob[1])+' '+str(yprob[2])+' '+str(yprob[3])
+                nd, xout, yout, eout, yfit, yprob = QLd.qldata(numb, Xv, Yv, Ev, reals, fitOp,
+                                                               Xdat, Xb, Yb, Eb, Wy, We,
+                                                               wrks, wrkr, lwrk)
+                message = ' Log(prob) : ' + str(yprob[0]) + ' ' + str(yprob[1]) + ' ' + str(yprob[2]) + ' ' + str(yprob[3])
                 logger.information(message)
             if prog == 'QSe':
                 workflow_prog.report('Processing Sample number %i as Stretched Exp' % spectrum)
-                nd,xout,yout,eout,yfit,yprob=Qse.qlstexp(numb,Xv,Yv,Ev,reals,fitOp,
-                                                         Xdat,Xb,Yb,Wy,We,dtn,xsc,
-                                                         wrks,wrkr,lwrk)
+                nd, xout, yout, eout, yfit, yprob = Qse.qlstexp(numb, Xv, Yv, Ev, reals, fitOp,
+                                                                Xdat, Xb, Yb, Wy, We, dtn, xsc,
+                                                                wrks, wrkr, lwrk)
             dataX = xout[:nd]
-            dataX = np.append(dataX,2*xout[nd-1]-xout[nd-2])
-            yfit_list = np.split(yfit[:4*nd],4)
+            dataX = np.append(dataX, 2 * xout[nd - 1] - xout[nd - 2])
+            yfit_list = np.split(yfit[:4 * nd], 4)
             dataF1 = yfit_list[1]
             if self._program == 'QL':
                 dataF2 = yfit_list[2]
@@ -303,25 +303,25 @@ class BayesQuasi(PythonAlgorithm):
             datX = dataX
             datY = yout[:nd]
             datE = eout[:nd]
-            datX = np.append(datX,dataX)
-            datY = np.append(datY,dataF1[:nd])
-            datE = np.append(datE,dataG)
+            datX = np.append(datX, dataX)
+            datY = np.append(datY, dataF1[:nd])
+            datE = np.append(datE, dataG)
             res1 = dataF1[:nd] - yout[:nd]
-            datX = np.append(datX,dataX)
-            datY = np.append(datY,res1)
-            datE = np.append(datE,dataG)
+            datX = np.append(datX, dataX)
+            datY = np.append(datY, res1)
+            datE = np.append(datE, dataG)
             nsp = 3
             names = 'data,fit.1,diff.1'
             res_plot = [0, 1, 2]
             if self._program == 'QL':
                 workflow_prog.report('Processing Lorentzian result data')
-                datX = np.append(datX,dataX)
-                datY = np.append(datY,dataF2[:nd])
-                datE = np.append(datE,dataG)
+                datX = np.append(datX, dataX)
+                datY = np.append(datY, dataF2[:nd])
+                datE = np.append(datE, dataG)
                 res2 = dataF2[:nd] - yout[:nd]
-                datX = np.append(datX,dataX)
-                datY = np.append(datY,res2)
-                datE = np.append(datE,dataG)
+                datX = np.append(datX, dataX)
+                datY = np.append(datY, res2)
+                datE = np.append(datE, dataG)
                 nsp += 2
                 names += ',fit.2,diff.2'
                 res_plot.append(4)
@@ -330,8 +330,8 @@ class BayesQuasi(PythonAlgorithm):
                 prob2.append(yprob[2])
 
             # create result workspace
-            fitWS = fname+'_Workspaces'
-            fout = fname+'_Workspace_'+ str(spectrum)
+            fitWS = fname + '_Workspaces'
+            fout = fname + '_Workspace_' + str(spectrum)
 
             workflow_prog.report('Creating OutputWorkspace')
             s_api.CreateWorkspace(OutputWorkspace=fout, DataX=datX, DataY=datY, DataE=datE,
@@ -342,29 +342,32 @@ class BayesQuasi(PythonAlgorithm):
 
         comp_prog = Progress(self, start=0.7, end=0.8, nreports=2)
         comp_prog.report('Creating Group Workspace')
-        s_api.GroupWorkspaces(InputWorkspaces=group,OutputWorkspace=fitWS)
+        s_api.GroupWorkspaces(InputWorkspaces=group, OutputWorkspace=fitWS)
 
         if self._program == 'QL':
             comp_prog.report('Processing Lorentzian probability data')
             yPr0 = np.array([prob0[0]])
             yPr1 = np.array([prob1[0]])
             yPr2 = np.array([prob2[0]])
-            for m in range(1,nsam):
-                yPr0 = np.append(yPr0,prob0[m])
-                yPr1 = np.append(yPr1,prob1[m])
-                yPr2 = np.append(yPr2,prob2[m])
+            for m in range(1, nsam):
+                yPr0 = np.append(yPr0, prob0[m])
+                yPr1 = np.append(yPr1, prob1[m])
+                yPr2 = np.append(yPr2, prob2[m])
             yProb = yPr0
-            yProb = np.append(yProb,yPr1)
-            yProb = np.append(yProb,yPr2)
+            yProb = np.append(yProb, yPr1)
+            yProb = np.append(yProb, yPr2)
             s_api.CreateWorkspace(OutputWorkspace=probWS, DataX=xProb, DataY=yProb, DataE=eProb,
                                   Nspec=3, UnitX='MomentumTransfer')
             outWS = self.C2Fw(fname)
         if self._program == 'QSe':
-            comp_prog.report('Runnning C2Se')
+            comp_prog.report('Running C2Se')
             outWS = self.C2Se(fname)
 
-        log_prog = Progress(self, start=0.8, end =1.0, nreports=8)
-        #Add some sample logs to the output workspaces
+        # Sort x axis
+        s_api.SortXAxis(InputWorkspace=outWS, OutputWorkspace=outWS, EnableLogging=False)
+
+        log_prog = Progress(self, start=0.8, end=1.0, nreports=8)
+        # Add some sample logs to the output workspaces
         log_prog.report('Copying Logs to outputWorkspace')
         s_api.CopyLogs(InputWorkspace=self._samWS, OutputWorkspace=outWS)
         log_prog.report('Adding Sample logs to Output workspace')
@@ -373,13 +376,14 @@ class BayesQuasi(PythonAlgorithm):
         s_api.CopyLogs(InputWorkspace=self._samWS, OutputWorkspace=fitWS)
         log_prog.report('Adding sample logs to Fit workspace')
         self._add_sample_logs(fitWS, prog, erange, nbins)
-        log_prog.report('Finialising log copying')
+        log_prog.report('Finalising log copying')
 
         self.setProperty('OutputWorkspaceFit', fitWS)
         self.setProperty('OutputWorkspaceResult', outWS)
         log_prog.report('Setting workspace properties')
 
         if self._program == 'QL':
+            s_api.SortXAxis(InputWorkspace=probWS, OutputWorkspace=probWS, EnableLogging=False)
             self.setProperty('OutputWorkspaceProb', probWS)
 
     def _add_sample_logs(self, workspace, fit_program, e_range, binning):
@@ -413,9 +417,9 @@ class BayesQuasi(PythonAlgorithm):
         log_alg.execute()
 
     def C2Se(self, sname):
-        outWS = sname+'_Result'
-        asc = self._read_ascii_file(sname+'.qse')
-        var = asc[3].split()                            #split line on spaces
+        outWS = sname + '_Result'
+        asc = self._read_ascii_file(sname + '.qse')
+        var = asc[3].split()  # split line on spaces
         nspec = var[0]
         var = ExtractInt(asc[6])
         first = 7
@@ -429,8 +433,8 @@ class BayesQuasi(PythonAlgorithm):
         dataE = np.array([])
         data = np.array([dataX, dataY, dataE])
 
-        for _ in range(0,ns):
-            first,Q,_,fw,it,be = self.SeBlock(asc,first)
+        for _ in range(0, ns):
+            first, Q, _, fw, it, be = self.SeBlock(asc, first)
             Xout.append(Q)
             Yf.append(fw[0])
             Ef.append(fw[1])
@@ -461,9 +465,9 @@ class BayesQuasi(PythonAlgorithm):
     def _add_xye_data(self, data, xout, Y, E):
 
         dX, dY, dE = data[0], data[1], data[2]
-        dX = np.append(dX,np.array(xout))
-        dY = np.append(dY,np.array(Y))
-        dE = np.append(dE,np.array(E))
+        dX = np.append(dX, np.array(xout))
+        dY = np.append(dY, np.array(Y))
+        dE = np.append(dE, np.array(E))
         data = (dX, dY, dE)
 
         return dX, dY, dE, data
@@ -478,51 +482,51 @@ class BayesQuasi(PythonAlgorithm):
                 asc.append(line)
         return asc
 
-    def SeBlock(self, a, index):                                 #read Ascii block of Integers
+    def SeBlock(self, a, index):  # read Ascii block of Integers
         index += 1
-        val = ExtractFloat(a[index])               #Q,AMAX,HWHM
+        val = ExtractFloat(a[index])  # Q,AMAX,HWHM
         Q = val[0]
         AMAX = val[1]
         HWHM = val[2]
         index += 1
-        val = ExtractFloat(a[index])               #A0
-        int0 = [AMAX*val[0]]
+        val = ExtractFloat(a[index])  # A0
+        int0 = [AMAX * val[0]]
         index += 1
-        val = ExtractFloat(a[index])                #AI,FWHM index peak
-        fw = [2.*HWHM*val[1]]
-        integer = [AMAX*val[0]]
+        val = ExtractFloat(a[index])  # AI,FWHM index peak
+        fw = [2. * HWHM * val[1]]
+        integer = [AMAX * val[0]]
         index += 1
-        val = ExtractFloat(a[index])                 #SIG0
+        val = ExtractFloat(a[index])  # SIG0
         int0.append(val[0])
         index += 1
-        val = ExtractFloat(a[index])                  #SIG3K
-        integer.append(AMAX*math.sqrt(math.fabs(val[0])+1.0e-20))
+        val = ExtractFloat(a[index])  # SIG3K
+        integer.append(AMAX * math.sqrt(math.fabs(val[0]) + 1.0e-20))
         index += 1
-        val = ExtractFloat(a[index])                  #SIG1K
-        fw.append(2.0*HWHM*math.sqrt(math.fabs(val[0])+1.0e-20))
+        val = ExtractFloat(a[index])  # SIG1K
+        fw.append(2.0 * HWHM * math.sqrt(math.fabs(val[0]) + 1.0e-20))
         index += 1
-        be = ExtractFloat(a[index])                  #EXPBET
+        be = ExtractFloat(a[index])  # EXPBET
         index += 1
-        val = ExtractFloat(a[index])                  #SIG2K
-        be.append(math.sqrt(math.fabs(val[0])+1.0e-20))
+        val = ExtractFloat(a[index])  # SIG2K
+        be.append(math.sqrt(math.fabs(val[0]) + 1.0e-20))
         index += 1
-        return index, Q, int0 ,fw , integer, be                                      #values as list
+        return index, Q, int0, fw, integer, be  # values as list
 
-    def _get_res_norm(self, resnormWS,ngrp):
-        if ngrp == 0:                                # read values from WS
-            dtnorm = s_api.mtd[resnormWS+'_Intensity'].readY(0)
-            xscale = s_api.mtd[resnormWS+'_Stretch'].readY(0)
-        else:                                        # constant values
+    def _get_res_norm(self, resnormWS, ngrp):
+        if ngrp == 0:  # read values from WS
+            dtnorm = s_api.mtd[resnormWS + '_Intensity'].readY(0)
+            xscale = s_api.mtd[resnormWS + '_Stretch'].readY(0)
+        else:  # constant values
             dtnorm = []
             xscale = []
-            for _ in range(0,ngrp):
+            for _ in range(0, ngrp):
                 dtnorm.append(1.0)
                 xscale.append(1.0)
-        dtn=PadArray(dtnorm,51)                      # pad for Fortran call
-        xsc=PadArray(xscale,51)
-        return dtn,xsc
+        dtn = PadArray(dtnorm, 51)  # pad for Fortran call
+        xsc = PadArray(xscale, 51)
+        return dtn, xsc
 
-    def _read_norm_file(self, readRes,resnormWS,nsam):            # get norm & scale values
+    def _read_norm_file(self, readRes, resnormWS, nsam):  # get norm & scale values
         resnorm_root = resnormWS
         # Obtain root of resnorm group name
         if '_Intensity' in resnormWS:
@@ -530,25 +534,25 @@ class BayesQuasi(PythonAlgorithm):
         if '_Stretch' in resnormWS:
             resnorm_root = resnormWS[:-8]
 
-        if readRes:                   # use ResNorm file option=o_res
-            Xin = s_api.mtd[resnorm_root+'_Intensity'].readX(0)
-            nrm = len(Xin)                        # no. points from length of x array
+        if readRes:  # use ResNorm file option=o_res
+            Xin = s_api.mtd[resnorm_root + '_Intensity'].readX(0)
+            nrm = len(Xin)  # no. points from length of x array
             if nrm == 0:
                 raise ValueError('ResNorm file has no Intensity points')
-            Xin = s_api.mtd[resnorm_root+'_Stretch'].readX(0)  # no. points from length of x array
+            Xin = s_api.mtd[resnorm_root + '_Stretch'].readX(0)  # no. points from length of x array
             if len(Xin) == 0:
                 raise ValueError('ResNorm file has no xscale points')
-            if nrm != nsam:                # check that no. groups are the same
-                raise ValueError('ResNorm groups (' +str(nrm) + ') not = Sample (' +str(nsam) +')')
+            if nrm != nsam:  # check that no. groups are the same
+                raise ValueError('ResNorm groups (' + str(nrm) + ') not = Sample (' + str(nsam) + ')')
             else:
-                dtn,xsc = self._get_res_norm(resnorm_root,0)
+                dtn, xsc = self._get_res_norm(resnorm_root, 0)
         else:
             # do not use ResNorm file
-            dtn,xsc = self._get_res_norm(resnorm_root,nsam)
-        return dtn,xsc
+            dtn, xsc = self._get_res_norm(resnorm_root, nsam)
+        return dtn, xsc
 
-    #Reads in a width ASCII file
-    def _read_width_file(self, readWidth,widthFile,numSampleGroups):
+    # Reads in a width ASCII file
+    def _read_width_file(self, readWidth, widthFile, numSampleGroups):
         widthY, widthE = [], []
         if readWidth:
             logger.information('Width file is ' + widthFile)
@@ -566,72 +570,72 @@ class BayesQuasi(PythonAlgorithm):
             numLines = len(asc)
             if numLines == 0:
                 raise ValueError('No groups in width file')
-            if numLines != numSampleGroups:                # check that no. groups are the same
-                raise ValueError('Width groups (' +str(numLines) + ') not = Sample (' +str(numSampleGroups) +')')
+            if numLines != numSampleGroups:  # check that no. groups are the same
+                raise ValueError('Width groups (' + str(numLines) + ') not = Sample (' + str(numSampleGroups) + ')')
         else:
             # no file: just use constant values
             widthY = np.zeros(numSampleGroups)
             widthE = np.zeros(numSampleGroups)
         # pad for Fortran call
-        widthY = PadArray(widthY,51)
-        widthE = PadArray(widthE,51)
+        widthY = PadArray(widthY, 51)
+        widthE = PadArray(widthE, 51)
 
         return widthY, widthE
 
     def C2Fw(self, sname):
-        output_workspace = sname+'_Result'
+        output_workspace = sname + '_Result'
         num_spectra = 0
         axis_names = []
         x, y, e = [], [], []
-        for nl in range(1,4):
-            num_params = nl*3+1
+        for nl in range(1, 4):
+            num_params = nl * 3 + 1
             num_spectra += num_params
 
             amplitude_data, width_data = [], []
-            amplitude_error, width_error  = [], []
+            amplitude_error, width_error = [], []
 
-            #read data from file output by fortran code
-            file_name = sname + '.ql' +str(nl)
+            # read data from file output by fortran code
+            file_name = sname + '.ql' + str(nl)
             x_data, peak_data, peak_error = self._read_ql_file(file_name, nl)
             x_data = np.asarray(x_data)
 
             amplitude_data, width_data, height_data = peak_data
             amplitude_error, width_error, height_error = peak_error
 
-            #transpose y and e data into workspace rows
+            # transpose y and e data into workspace rows
             amplitude_data, width_data = np.asarray(amplitude_data).T, np.asarray(width_data).T
             amplitude_error, width_error = np.asarray(amplitude_error).T, np.asarray(width_error).T
             height_data, height_error = np.asarray(height_data), np.asarray(height_error)
 
-            #calculate EISF and EISF error
-            total = height_data+amplitude_data
+            # calculate EISF and EISF error
+            total = height_data + amplitude_data
             EISF_data = height_data / total
-            total_error = height_error**2 + amplitude_error**2
-            EISF_error = EISF_data * np.sqrt((height_error**2/height_data**2) + (total_error/total**2))
+            total_error = height_error ** 2 + amplitude_error ** 2
+            EISF_error = EISF_data * np.sqrt((height_error ** 2 / height_data ** 2) + (total_error / total ** 2))
 
-            #interlace amplitudes and widths of the peaks
+            # interlace amplitudes and widths of the peaks
             y.append(np.asarray(height_data))
             for amp, width, EISF in zip(amplitude_data, width_data, EISF_data):
                 y.append(amp)
                 y.append(width)
                 y.append(EISF)
 
-            #iterlace amplitude and width errors of the peaks
+            # interlace amplitude and width errors of the peaks
             e.append(np.asarray(height_error))
             for amp, width, EISF in zip(amplitude_error, width_error, EISF_error):
                 e.append(amp)
                 e.append(width)
                 e.append(EISF)
 
-            #create x data and axis names for each function
-            axis_names.append('f'+str(nl)+'.f0.'+'Height')
+            # create x data and axis names for each function
+            axis_names.append('f' + str(nl) + '.f0.' + 'Height')
             x.append(x_data)
-            for j in range(1,nl+1):
-                axis_names.append('f'+str(nl)+'.f'+str(j)+'.Amplitude')
+            for j in range(1, nl + 1):
+                axis_names.append('f' + str(nl) + '.f' + str(j) + '.Amplitude')
                 x.append(x_data)
-                axis_names.append('f'+str(nl)+'.f'+str(j)+'.FWHM')
+                axis_names.append('f' + str(nl) + '.f' + str(j) + '.FWHM')
                 x.append(x_data)
-                axis_names.append('f'+str(nl)+'.f'+str(j)+'.EISF')
+                axis_names.append('f' + str(nl) + '.f' + str(j) + '.EISF')
                 x.append(x_data)
 
         x = np.asarray(x).flatten()
@@ -644,83 +648,84 @@ class BayesQuasi(PythonAlgorithm):
         return output_workspace
 
     def _yield_floats(self, block):
-        #yield a list of floats from a list of lines of text
-        #encapsulates the iteration over a block of lines
+        # yield a list of floats from a list of lines of text
+        # encapsulates the iteration over a block of lines
         for line in block:
             yield ExtractFloat(line)
 
     def _read_ql_file(self, file_name, nl):
-        #offet to ignore header
+        # offset to ignore header
         header_offset = 8
-        block_size = 4+nl*3
+        block_size = 4 + nl * 3
 
         asc = self._read_ascii_file(file_name)
-        #extract number of blocks from the file header
+        # extract number of blocks from the file header
         num_blocks = int(ExtractFloat(asc[3])[0])
 
         q_data = []
         amp_data, FWHM_data, height_data = [], [], []
         amp_error, FWHM_error, height_error = [], [], []
 
-        #iterate over each block of fit parameters in the file
-        #each block corresponds to a single column in the final workspace
+        # iterate over each block of fit parameters in the file
+        # each block corresponds to a single column in the final workspace
         for block_num in range(num_blocks):
-            lower_index = header_offset+(block_size*block_num)
-            upper_index = lower_index+block_size
+            lower_index = header_offset + (block_size * block_num)
+            upper_index = lower_index + block_size
 
-            #create iterator for each line in the block
+            # create iterator for each line in the block
             line_pointer = self._yield_floats(asc[lower_index:upper_index])
 
-            #Q,AMAX,HWHM,BSCL,GSCL
+            # Q,AMAX,HWHM,BSCL,GSCL
             line = next(line_pointer)
             Q, AMAX, HWHM, _, _ = line
             q_data.append(Q)
 
-            #A0,A1,A2,A4
+            # A0,A1,A2,A4
             line = next(line_pointer)
-            block_height = AMAX*line[0]
+            block_height = AMAX * line[0]
 
-            #parse peak data from block
+            # parse peak data from block
             block_FWHM = []
             block_amplitude = []
             for _ in range(nl):
-                #Amplitude,FWHM for each peak
+                # Amplitude,FWHM for each peak
                 line = next(line_pointer)
-                amp = AMAX*line[0]
-                FWHM = 2.*HWHM*line[1]
+                amp = AMAX * line[0]
+                FWHM = 2. * HWHM * line[1]
                 block_amplitude.append(amp)
                 block_FWHM.append(FWHM)
 
-            #next parse error data from block
-            #SIG0
+            # next parse error data from block
+            # SIG0
             line = next(line_pointer)
             block_height_e = line[0]
 
             block_FWHM_e = []
             block_amplitude_e = []
             for _ in range(nl):
-                #Amplitude error,FWHM error for each peak
-                #SIGIK
+                # Amplitude error,FWHM error for each peak
+                # SIGIK
                 line = next(line_pointer)
-                amp = AMAX*math.sqrt(math.fabs(line[0])+1.0e-20)
+                amp = AMAX * math.sqrt(math.fabs(line[0]) + 1.0e-20)
                 block_amplitude_e.append(amp)
 
-                #SIGFK
+                # SIGFK
                 line = next(line_pointer)
-                FWHM = 2.0*HWHM*math.sqrt(math.fabs(line[0])+1.0e-20)
+                FWHM = 2.0 * HWHM * math.sqrt(math.fabs(line[0]) + 1.0e-20)
                 block_FWHM_e.append(FWHM)
 
-            #append data from block
+            # append data from block
             amp_data.append(block_amplitude)
             FWHM_data.append(block_FWHM)
             height_data.append(block_height)
 
-            #append error values from block
+            # append error values from block
             amp_error.append(block_amplitude_e)
             FWHM_error.append(block_FWHM_e)
             height_error.append(block_height_e)
 
         return q_data, (amp_data, FWHM_data, height_data), (amp_error, FWHM_error, height_error)
 
+
 # Register algorithm with Mantid
 AlgorithmFactory.subscribe(BayesQuasi)
diff --git a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/BayesStretch.py b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/BayesStretch.py
index f173b1db8998799ad83fc6d0fce6eae18be5a831..b13b0565765d903b0ca85052cbdee06c229d5145 100644
--- a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/BayesStretch.py
+++ b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/BayesStretch.py
@@ -1,4 +1,4 @@
-#pylint: disable=invalid-name,too-many-instance-attributes,too-many-branches,no-init
+# pylint: disable=invalid-name,too-many-instance-attributes,too-many-branches,no-init
 from __future__ import (absolute_import, division, print_function)
 from IndirectImport import *
 
@@ -11,11 +11,10 @@ import os
 import numpy as np
 
 if is_supported_f2py_platform():
-    Que     = import_f2py("Quest")
+    Que = import_f2py("Quest")
 
 
 class BayesStretch(PythonAlgorithm):
-
     _sam_name = None
     _sam_ws = None
     _res_name = None
@@ -59,7 +58,7 @@ class BayesStretch(PythonAlgorithm):
                              doc='Fit option for using the elastic peak')
 
         self.declareProperty(name='Background', defaultValue='Flat',
-                             validator=StringListValidator(['Sloping','Flat','Zero']),
+                             validator=StringListValidator(['Sloping', 'Flat', 'Zero']),
                              doc='Fit option for the type of background')
 
         self.declareProperty(name='NumberSigma', defaultValue=50,
@@ -96,13 +95,13 @@ class BayesStretch(PythonAlgorithm):
 
         return issues
 
-    #pylint: disable=too-many-locals
+    # pylint: disable=too-many-locals
     def PyExec(self):
         run_f2py_compatibility_test()
 
         from IndirectBayes import (CalcErange, GetXYE)
         from IndirectCommon import (CheckXrange, CheckAnalysers, getEfixed, GetThetaQ, CheckHistZero)
-        setup_prog = Progress(self, start=0.0, end=0.3, nreports = 5)
+        setup_prog = Progress(self, start=0.0, end=0.3, nreports=5)
         logger.information('BayesStretch input')
         logger.information('Sample is %s' % self._sam_name)
         logger.information('Resolution is %s' % self._res_name)
@@ -123,9 +122,9 @@ class BayesStretch(PythonAlgorithm):
         theta, Q = GetThetaQ(self._sam_name)
 
         setup_prog.report('Checking Histograms')
-        nsam,ntc = CheckHistZero(self._sam_name)
+        nsam, ntc = CheckHistZero(self._sam_name)
 
-        #check if we're performing a sequential fit
+        # check if we're performing a sequential fit
         if not self._loop:
             nsam = 1
 
@@ -135,22 +134,22 @@ class BayesStretch(PythonAlgorithm):
 
         setup_prog.report('Creating FORTRAN Input')
         fname = self._sam_name[:-4] + '_Stretch'
-        wrks=os.path.join(workdir, self._sam_name[:-4])
+        wrks = os.path.join(workdir, self._sam_name[:-4])
         logger.information('lptfile : %s_Qst.lpt' % wrks)
-        lwrk=len(wrks)
+        lwrk = len(wrks)
         wrks.ljust(140, ' ')
-        wrkr=self._res_name
+        wrkr = self._res_name
         wrkr.ljust(140, ' ')
-        eBet0 = np.zeros(self._nbet)                  # set errors to zero
-        eSig0 = np.zeros(self._nsig)                  # set errors to zero
+        eBet0 = np.zeros(self._nbet)  # set errors to zero
+        eSig0 = np.zeros(self._nsig)  # set errors to zero
         rscl = 1.0
         Qaxis = ''
 
-        workflow_prog = Progress(self, start=0.3, end=0.7, nreports=nsam*3)
+        workflow_prog = Progress(self, start=0.3, end=0.7, nreports=nsam * 3)
 
         # Empty arrays to hold Sigma and Bet x,y,e values
-        xSig, ySig, eSig = [],[],[]
-        xBet, yBet, eBet = [],[],[]
+        xSig, ySig, eSig = [], [], []
+        xBet, yBet, eBet = [], [], []
 
         for m in range(nsam):
             logger.information('Group %i at angle %f' % (m, theta[m]))
@@ -168,13 +167,13 @@ class BayesStretch(PythonAlgorithm):
             reals = [efix, theta[m], rscl, bnorm]
 
             workflow_prog.report('Processing spectrum number %i' % m)
-            xsout, ysout, xbout, ybout, zpout=Que.quest(numb, Xv, Yv, Ev, reals, fitOp,
-                                                        Xdat, Xb, Yb, wrks, wrkr, lwrk)
-            dataXs = xsout[:self._nsig]               # reduce from fixed FORTRAN array
+            xsout, ysout, xbout, ybout, zpout = Que.quest(numb, Xv, Yv, Ev, reals, fitOp,
+                                                          Xdat, Xb, Yb, wrks, wrkr, lwrk)
+            dataXs = xsout[:self._nsig]  # reduce from fixed FORTRAN array
             dataYs = ysout[:self._nsig]
             dataXb = xbout[:self._nbet]
             dataYb = ybout[:self._nbet]
-            zpWS = fname + '_Zp' +str(m)
+            zpWS = fname + '_Zp' + str(m)
             if m > 0:
                 Qaxis += ','
             Qaxis += str(Q[m])
@@ -184,7 +183,7 @@ class BayesStretch(PythonAlgorithm):
             dataEz = []
 
             for n in range(self._nsig):
-                yfit_list = np.split(zpout[:self._nsig*self._nbet], self._nsig)
+                yfit_list = np.split(zpout[:self._nsig * self._nbet], self._nsig)
                 dataYzp = yfit_list[n]
 
                 dataXz = np.append(dataXz, xbout[:self._nbet])
@@ -194,19 +193,19 @@ class BayesStretch(PythonAlgorithm):
             zpWS = fname + '_Zp' + str(m)
             self._create_workspace(zpWS, [dataXz, dataYz, dataEz], self._nsig, dataXs, True)
 
-            xSig = np.append(xSig,dataXs)
-            ySig = np.append(ySig,dataYs)
-            eSig = np.append(eSig,eSig0)
-            xBet = np.append(xBet,dataXb)
-            yBet = np.append(yBet,dataYb)
-            eBet = np.append(eBet,eBet0)
+            xSig = np.append(xSig, dataXs)
+            ySig = np.append(ySig, dataYs)
+            eSig = np.append(eSig, eSig0)
+            xBet = np.append(xBet, dataXb)
+            yBet = np.append(yBet, dataYb)
+            eBet = np.append(eBet, eBet0)
 
             if m == 0:
                 groupZ = zpWS
             else:
-                groupZ = groupZ +','+ zpWS
+                groupZ = groupZ + ',' + zpWS
 
-        #create workspaces for sigma and beta
+        # create workspaces for sigma and beta
         workflow_prog.report('Creating OutputWorkspace')
         self._create_workspace(fname + '_Sigma', [xSig, ySig, eSig], nsam, Qaxis)
         self._create_workspace(fname + '_Beta', [xBet, yBet, eBet], nsam, Qaxis)
@@ -219,31 +218,35 @@ class BayesStretch(PythonAlgorithm):
         s_api.GroupWorkspaces(InputWorkspaces=groupZ,
                               OutputWorkspace=contour_ws)
 
-        #Add some sample logs to the output workspaces
-        log_prog = Progress(self, start=0.8, end =1.0, nreports=6)
+        # Add some sample logs to the output workspaces
+        log_prog = Progress(self, start=0.8, end=1.0, nreports=6)
         log_prog.report('Copying Logs to Fit workspace')
         copy_log_alg = self.createChildAlgorithm('CopyLogs', enableLogging=False)
         copy_log_alg.setProperty('InputWorkspace', self._sam_name)
-        copy_log_alg.setProperty('OutputWorkspace',fit_ws)
+        copy_log_alg.setProperty('OutputWorkspace', fit_ws)
         copy_log_alg.execute()
 
         log_prog.report('Adding Sample logs to Fit workspace')
         self._add_sample_logs(fit_ws, self._erange, self._nbins[0])
 
         log_prog.report('Copying logs to Contour workspace')
-        copy_log_alg.setProperty('InputWorkspace',self._sam_name)
-        copy_log_alg.setProperty('OutputWorkspace',contour_ws)
+        copy_log_alg.setProperty('InputWorkspace', self._sam_name)
+        copy_log_alg.setProperty('OutputWorkspace', contour_ws)
         copy_log_alg.execute()
 
         log_prog.report('Adding sample logs to Contour workspace')
         self._add_sample_logs(contour_ws, self._erange, self._nbins[0])
         log_prog.report('Finialising log copying')
 
+        # sort x axis
+        s_api.SortXAxis(InputWorkspace=fit_ws, OutputWorkspace=fit_ws, EnableLogging=False)
+        s_api.SortXAxis(InputWorkspace=contour_ws, OutputWorkspace=contour_ws, EnableLogging=False)
+
         self.setProperty('OutputWorkspaceFit', fit_ws)
         self.setProperty('OutputWorkspaceContour', contour_ws)
         log_prog.report('Setting workspace properties')
 
-#----------------------------- Helper functions -----------------------------
+    # ----------------------------- Helper functions -----------------------------
 
     def _encode_fit_ops(self, elastic, background):
         """
@@ -274,8 +277,8 @@ class BayesStretch(PythonAlgorithm):
             logger.information('Defaulting to current working Directory: ' + workdir)
         return workdir
 
-    #pylint: disable=too-many-arguments
-    def _create_workspace(self, name, xye, num_spec, vert_axis, is_zp_ws = False):
+    # pylint: disable=too-many-arguments
+    def _create_workspace(self, name, xye, num_spec, vert_axis, is_zp_ws=False):
         """
         Creates a workspace from FORTRAN data
 
@@ -299,11 +302,11 @@ class BayesStretch(PythonAlgorithm):
         unitx = ws.getAxis(0).setUnit("Label")
         if is_zp_ws:
             unity = ws.getAxis(1).setUnit("Label")
-            unitx.setLabel('beta' , '')
-            unity.setLabel('sigma' , '')
+            unitx.setLabel('beta', '')
+            unity.setLabel('sigma', '')
         else:
             if name[:4] == 'Beta':
-                unitx.setLabel('beta' , '')
+                unitx.setLabel('beta', '')
             else:
                 unitx.setLabel('sigma', '')
 
@@ -314,7 +317,7 @@ class BayesStretch(PythonAlgorithm):
         energy_min, energy_max = erange
 
         log_names = ['res_file', 'background', 'elastic_peak',
-                     'energy_min', 'energy_max','sample_binning']
+                     'energy_min', 'energy_max', 'sample_binning']
         log_values = [self._res_name, str(self._background), str(self._elastic),
                       energy_min, energy_max, sample_binning]
 
@@ -322,7 +325,7 @@ class BayesStretch(PythonAlgorithm):
         add_log.setProperty('Workspace', workspace)
         add_log.setProperty('LogNames', log_names)
         add_log.setProperty('LogValues', log_values)
-        add_log.setProperty('ParseType', True) # Should determine String/Number type
+        add_log.setProperty('ParseType', True)  # Should determine String/Number type
         add_log.execute()
 
     def _get_properties(self):
@@ -343,4 +346,4 @@ class BayesStretch(PythonAlgorithm):
         self._nbins = [self._sam_bins, 1]
 
 
-AlgorithmFactory.subscribe(BayesStretch)         # Register algorithm with Mantid
+AlgorithmFactory.subscribe(BayesStretch)  # Register algorithm with Mantid
diff --git a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IndirectILLEnergyTransfer.py b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IndirectILLEnergyTransfer.py
index ddea9a7709c946aea7ad1f54587b51d5d8186f25..0e5b3cdf2039fdb48947313277ba54afac6e9598 100644
--- a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IndirectILLEnergyTransfer.py
+++ b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IndirectILLEnergyTransfer.py
@@ -2,17 +2,18 @@ from __future__ import (absolute_import, division, print_function)
 
 import os
 import numpy as np
-from mantid.simpleapi import *  # noqa
-from mantid.kernel import *  # noqa
-from mantid.api import *  # noqa
 from mantid import config, mtd, logger
+from mantid.kernel import StringListValidator, Direction
+from mantid.api import PythonAlgorithm, MultipleFileProperty, FileProperty, \
+    WorkspaceGroupProperty, FileAction, Progress
+from mantid.simpleapi import *  # noqa
 
 
 def _ws_or_none(s):
     return mtd[s] if s != '' else None
 
 
-def extract_workspace(ws, ws_out, x_start, x_end):
+def _extract_workspace(ws, ws_out, x_start, x_end):
     """
     Extracts a part of the workspace and
     shifts the x-axis to start from 0
@@ -43,12 +44,14 @@ class IndirectILLEnergyTransfer(PythonAlgorithm):
     _red_ws = None
     _psd_int_range = None
     _use_map_file = None
+    _spectrum_axis = None
+    _efixed = None
 
     def category(self):
         return "Workflow\\MIDAS;Workflow\\Inelastic;Inelastic\\Indirect;Inelastic\\Reduction"
 
     def summary(self):
-        return 'Performs energy transfer reduction for ILL indirect geometry data, instrument IN16B.'
+        return 'Performs initial energy transfer reduction for ILL indirect geometry data, instrument IN16B.'
 
     def name(self):
         return "IndirectILLEnergyTransfer"
@@ -90,6 +93,10 @@ class IndirectILLEnergyTransfer(PythonAlgorithm):
                                                     direction=Direction.Output),
                              doc='Group name for the reduced workspace(s).')
 
+        self.declareProperty(name='SpectrumAxis', defaultValue='SpectrumNumber',
+                             validator=StringListValidator(['SpectrumNumber', '2Theta', 'Q', 'Q2']),
+                             doc='The spectrum axis conversion target.')
+
     def validateInputs(self):
 
         issues = dict()
@@ -113,6 +120,7 @@ class IndirectILLEnergyTransfer(PythonAlgorithm):
         self._reflection = self.getPropertyValue('Reflection')
         self._dead_channels = self.getProperty('CropDeadMonitorChannels').value
         self._red_ws = self.getPropertyValue('OutputWorkspace')
+        self._spectrum_axis = self.getPropertyValue('SpectrumAxis')
 
         if self._map_file or (self._psd_int_range[0] == 1 and self._psd_int_range[1] == 128):
             self._use_map_file = True
@@ -280,6 +288,8 @@ class IndirectILLEnergyTransfer(PythonAlgorithm):
 
         LoadParameterFile(Workspace=self._ws, Filename=self._parameter_file)
 
+        self._efixed = self._instrument.getNumberParameter('Efixed')[0]
+
         self._setup_run_properties()
 
         if self._mirror_sense == 14:      # two wings, extract left and right
@@ -287,8 +297,8 @@ class IndirectILLEnergyTransfer(PythonAlgorithm):
             size = mtd[self._ws].blocksize()
             left = self._ws + '_left'
             right = self._ws + '_right'
-            extract_workspace(self._ws, left, 0, int(size/2))
-            extract_workspace(self._ws, right, int(size/2), size)
+            _extract_workspace(self._ws, left, 0, int(size/2))
+            _extract_workspace(self._ws, right, int(size/2), size)
             DeleteWorkspace(self._ws)
             self._reduce_one_wing(left)
             self._reduce_one_wing(right)
@@ -333,6 +343,18 @@ class IndirectILLEnergyTransfer(PythonAlgorithm):
 
         self._convert_to_energy(ws, n_cropped_bins)
 
+        target = None
+        if self._spectrum_axis == '2Theta':
+            target = 'Theta'
+        elif self._spectrum_axis == 'Q':
+            target = 'ElasticQ'
+        elif self._spectrum_axis == 'Q2':
+            target = 'ElasticQSquared'
+
+        if self._spectrum_axis != 'SpectrumNumber':
+            ConvertSpectrumAxis(InputWorkspace=ws,OutputWorkspace=ws,
+                                EMode='Indirect',Target=target,EFixed=self._efixed)
+
     def _group_detectors_with_range(self, ws):
         """
         Groups (sums) the multi-detector's pixels according to given range
@@ -382,6 +404,9 @@ class IndirectILLEnergyTransfer(PythonAlgorithm):
             if mtd[int].readY(0)[0] !=0: # this needs to be checked
                 Scale(InputWorkspace=ws, OutputWorkspace=ws, Factor=1. / mtd[int].readY(0)[0])
 
+            # remember the integral of the monitor
+            AddSampleLog(Workspace=ws, LogName="MonitorIntegral", LogType="Number", LogText=str(mtd[int].readY(0)[0]))
+
             DeleteWorkspace(int)
 
         elif self._reduction_type == 'IFWS':
@@ -404,6 +429,9 @@ class IndirectILLEnergyTransfer(PythonAlgorithm):
             if mtd[int].readY(0)[0] != 0: # this needs to be checked
                 Scale(InputWorkspace = ws, OutputWorkspace = ws, Factor = 1./mtd[int].readY(0)[0])
 
+            # remember the integral of the monitor
+            AddSampleLog(Workspace=ws, LogName="MonitorIntegral", LogType="Number", LogText=str(mtd[int].readY(0)[0]))
+
             DeleteWorkspace(i1)
             DeleteWorkspace(i2)
             DeleteWorkspace(int)
diff --git a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IndirectILLReductionFWS.py b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IndirectILLReductionFWS.py
index f9bb92bb73b8caa16eb9d7e8a321d06e797fd5b7..c5d21175b91ffbe997995827973fd6c9d56ce708 100644
--- a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IndirectILLReductionFWS.py
+++ b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IndirectILLReductionFWS.py
@@ -1,17 +1,11 @@
 from __future__ import (absolute_import, division, print_function)
 
-
-from mantid.simpleapi import *  # noqa
-from mantid.kernel import *  # noqa
-from mantid.api import *  # noqa
-from mantid import mtd
-import os
 import numpy as np
 import time
-
-
-def _insert_energy_value(ws_name, energy):
-    return ws_name.replace('_red', '_' + str(energy) + '_red')
+from mantid import mtd
+from mantid.kernel import StringListValidator, Direction, FloatBoundedValidator
+from mantid.api import PythonAlgorithm, MultipleFileProperty, FileProperty, FileAction, WorkspaceGroupProperty, Progress
+from mantid.simpleapi import *  # noqa
 
 
 class IndirectILLReductionFWS(PythonAlgorithm):
@@ -35,7 +29,7 @@ class IndirectILLReductionFWS(PythonAlgorithm):
     _all_runs = None
 
     def category(self):
-        return "Workflow\\MIDAS;Workflow\\Inelastic;Inelastic\\Indirect;Inelastic\\Reduction"
+        return "Workflow\\MIDAS;Workflow\\Inelastic;Inelastic\\Indirect;Inelastic\\Reduction;ILL\\Indirect"
 
     def summary(self):
         return 'Performs fixed-window scan (FWS) multiple file reduction (both elastic and inelastic) ' \
@@ -109,6 +103,10 @@ class IndirectILLReductionFWS(PythonAlgorithm):
                                                     direction=Direction.Output),
                              doc='Output workspace group')
 
+        self.declareProperty(name='SpectrumAxis', defaultValue='SpectrumNumber',
+                             validator=StringListValidator(['SpectrumNumber', '2Theta', 'Q', 'Q2']),
+                             doc='The spectrum axis conversion target.')
+
     def validateInputs(self):
 
         issues = dict()
@@ -125,14 +123,28 @@ class IndirectILLReductionFWS(PythonAlgorithm):
         self._back_scaling = self.getProperty('BackgroundScalingFactor').value
         self._back_option = self.getPropertyValue('BackgroundOption')
         self._calib_option = self.getPropertyValue('CalibrationOption')
+        self._spectrum_axis = self.getPropertyValue('SpectrumAxis')
 
         # arguments to pass to IndirectILLEnergyTransfer
         self._common_args['MapFile'] = self.getPropertyValue('MapFile')
         self._common_args['Analyser'] = self.getPropertyValue('Analyser')
         self._common_args['Reflection'] = self.getPropertyValue('Reflection')
         self._common_args['ManualPSDIntegrationRange'] = self.getProperty('ManualPSDIntegrationRange').value
+        self._common_args['SpectrumAxis'] = self._spectrum_axis
+
+        self._red_ws = self.getPropertyValue('OutputWorkspace')
 
-        self._red_ws = self.getPropertyValue('OutputWorkspace') + '_red'
+        suffix = ''
+        if self._spectrum_axis == 'SpectrumNumber':
+            suffix = '_red'
+        elif self._spectrum_axis == '2Theta':
+            suffix = '_2theta'
+        elif self._spectrum_axis == 'Q':
+            suffix = '_q'
+        elif self._spectrum_axis == 'Q2':
+            suffix = '_q2'
+
+        self._red_ws += suffix
 
         # Nexus metadata criteria for FWS type of data (both EFWS and IFWS)
         self._criteria = '($/entry0/instrument/Doppler/maximum_delta_energy$ == 0. or ' \
@@ -214,9 +226,18 @@ class IndirectILLReductionFWS(PythonAlgorithm):
             right = mtd[groupws].getItem(1).getName()
             sum = '__sum_'+groupws
             Plus(LHSWorkspace=left, RHSWorkspace=right, OutputWorkspace=sum)
+
+            left_monitor = mtd[left].getRun().getLogData('MonitorIntegral').value
+            right_monitor = mtd[right].getRun().getLogData('MonitorIntegral').value
+
+            if left_monitor != 0. and right_monitor != 0.:
+                Scale(InputWorkspace=sum, OutputWorkspace=sum, Factor=0.5)
+
             DeleteWorkspace(left)
             DeleteWorkspace(right)
+
             RenameWorkspace(InputWorkspace=sum, OutputWorkspace=groupws)
+
         else:
             RenameWorkspace(InputWorkspace=mtd[groupws].getItem(0), OutputWorkspace=groupws)
 
@@ -339,7 +360,7 @@ class IndirectILLReductionFWS(PythonAlgorithm):
 
         for energy in self._all_runs[self._SAMPLE]:
             if energy in self._all_runs[label]:
-                ws = _insert_energy_value(self._red_ws, energy) + '_' + label
+                ws = self._insert_energy_value(self._red_ws + '_' + label, energy, label)
                 x_range = mtd[ws].readX(0)[-1] - mtd[ws].readX(0)[0]
                 if mtd[ws].blocksize() > 1:
                     Integration(InputWorkspace=ws, OutputWorkspace=ws)
@@ -354,13 +375,13 @@ class IndirectILLReductionFWS(PythonAlgorithm):
 
         for energy in self._all_runs[self._SAMPLE]:
             if energy in self._all_runs[label]:
-                ref = _insert_energy_value(self._red_ws, energy)
+                ref = self._insert_energy_value(self._red_ws, energy, self._SAMPLE)
                 ws = ref + '_' + label
                 if mtd[ws].blocksize() > 1:
                     SplineInterpolation(WorkspaceToInterpolate=ws,
                                         WorkspaceToMatch=ref,
                                         OutputWorkspace=ws)
-                    # add Linear2Point=True, when ready
+                    # TODO: add Linear2Point=True when ready
 
     def _subtract_background(self):
         '''
@@ -369,7 +390,7 @@ class IndirectILLReductionFWS(PythonAlgorithm):
 
         for energy in self._all_runs[self._SAMPLE]:
             if energy in self._all_runs[self._BACKGROUND]:
-                sample_ws = _insert_energy_value(self._red_ws, energy)
+                sample_ws = self._insert_energy_value(self._red_ws, energy, self._SAMPLE)
                 back_ws = sample_ws + '_' + self._BACKGROUND
                 Minus(LHSWorkspace=sample_ws, RHSWorkspace=back_ws, OutputWorkspace=sample_ws)
             else:
@@ -383,7 +404,7 @@ class IndirectILLReductionFWS(PythonAlgorithm):
 
         for energy in self._all_runs[self._SAMPLE]:
             if energy in self._all_runs[self._CALIBRATION]:
-                sample_ws = _insert_energy_value(self._red_ws, energy)
+                sample_ws = self._insert_energy_value(self._red_ws, energy, self._SAMPLE)
                 calib_ws = sample_ws + '_' + self._CALIBRATION
                 Divide(LHSWorkspace=sample_ws, RHSWorkspace=calib_ws, OutputWorkspace=sample_ws)
                 self._scale_calibration(sample_ws,calib_ws)
@@ -468,7 +489,7 @@ class IndirectILLReductionFWS(PythonAlgorithm):
             ws_list = self._all_runs[label][energy]
             size = len(self._all_runs[label][energy])
 
-            wsname = _insert_energy_value(groupname, energy)
+            wsname = self._insert_energy_value(groupname, energy, label)
 
             togroup.append(wsname)
             nspectra = mtd[ws_list[0]].getNumberHistograms()
@@ -535,5 +556,25 @@ class IndirectILLReductionFWS(PythonAlgorithm):
         else:
             axis.setUnit("Label").setLabel(self._observable, '')
 
+    def _insert_energy_value(self, ws_name, energy, label):
+        '''
+        Inserts the doppler's energy value in the workspace name
+        in between the user input and automatic suffix
+        @param ws_name : workspace name
+        @param energy : energy value
+        @param label : sample, background, or calibration
+        @return : new name with energy value inside
+        Example:
+        user_input_2theta > user_input_1.5_2theta
+        user_input_red_background > user_input_1.5_red_background
+        '''
+        suffix_pos = ws_name.rfind('_')
+
+        if label != self._SAMPLE:
+            # find second to last underscore
+            suffix_pos = ws_name.rfind('_', 0, suffix_pos)
+
+        return ws_name[:suffix_pos] + '_' + str(energy) + ws_name[suffix_pos:]
+
 # Register algorithm with Mantid
 AlgorithmFactory.subscribe(IndirectILLReductionFWS)
diff --git a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IndirectILLReductionQENS.py b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IndirectILLReductionQENS.py
index 880acb246755fd4dbb41040976cd2e96c8344037..a4eda94cf26a39a3c14b47d28dd13754f29165a1 100644
--- a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IndirectILLReductionQENS.py
+++ b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IndirectILLReductionQENS.py
@@ -1,11 +1,13 @@
 from __future__ import (absolute_import, division, print_function)
 
-from mantid.simpleapi import *  # noqa
-from mantid.kernel import *  # noqa
-from mantid.api import *  # noqa
-from mantid import mtd
-import numpy
 import os
+import numpy
+from mantid import mtd
+from mantid.kernel import StringListValidator, Direction, FloatBoundedValidator, \
+    FloatArrayMandatoryValidator, IntBoundedValidator
+from mantid.api import PythonAlgorithm, MultipleFileProperty, FileProperty, \
+    FileAction, WorkspaceGroupProperty, Progress
+from mantid.simpleapi import *  # noqa
 
 
 class IndirectILLReductionQENS(PythonAlgorithm):
@@ -23,9 +25,10 @@ class IndirectILLReductionQENS(PythonAlgorithm):
     _common_args = {}
     _peak_range = []
     _runs = None
+    _spectrum_axis = None
 
     def category(self):
-        return "Workflow\\MIDAS;Workflow\\Inelastic;Inelastic\\Indirect;Inelastic\\Reduction"
+        return "Workflow\\MIDAS;Workflow\\Inelastic;Inelastic\\Indirect;Inelastic\\Reduction;ILL\\Indirect"
 
     def summary(self):
         return 'Performs quasi-elastic neutron scattering (QENS) multiple file reduction ' \
@@ -111,6 +114,10 @@ class IndirectILLReductionQENS(PythonAlgorithm):
                                                     direction=Direction.Output),
                              doc='Group name for the reduced workspace(s).')
 
+        self.declareProperty(name='SpectrumAxis', defaultValue='SpectrumNumber',
+                             validator=StringListValidator(['SpectrumNumber', '2Theta', 'Q', 'Q2']),
+                             doc='The spectrum axis conversion target.')
+
     def validateInputs(self):
 
         issues = dict()
@@ -141,8 +148,21 @@ class IndirectILLReductionQENS(PythonAlgorithm):
         self._unmirror_option = self.getProperty('UnmirrorOption').value
         self._back_scaling = self.getProperty('BackgroundScalingFactor').value
         self._peak_range = self.getProperty('CalibrationPeakRange').value
+        self._spectrum_axis = self.getPropertyValue('SpectrumAxis')
+
+        self._red_ws = self.getPropertyValue('OutputWorkspace')
+
+        suffix = ''
+        if self._spectrum_axis == 'SpectrumNumber':
+            suffix = '_red'
+        elif self._spectrum_axis == '2Theta':
+            suffix = '_2theta'
+        elif self._spectrum_axis == 'Q':
+            suffix = '_q'
+        elif self._spectrum_axis == 'Q2':
+            suffix = '_q2'
 
-        self._red_ws = self.getPropertyValue('OutputWorkspace') + '_red'
+        self._red_ws += suffix
 
         # arguments to pass to IndirectILLEnergyTransfer
         self._common_args['MapFile'] = self.getPropertyValue('MapFile')
@@ -150,6 +170,7 @@ class IndirectILLReductionQENS(PythonAlgorithm):
         self._common_args['Reflection'] = self.getPropertyValue('Reflection')
         self._common_args['ManualPSDIntegrationRange'] = self.getProperty('ManualPSDIntegrationRange').value
         self._common_args['CropDeadMonitorChannels'] = self.getProperty('CropDeadMonitorChannels').value
+        self._common_args['SpectrumAxis'] = self._spectrum_axis
 
         if self._sum_all_runs is True:
             self.log().notice('All the sample runs will be summed')
diff --git a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSCalculateTransmission.py b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSCalculateTransmission.py
new file mode 100644
index 0000000000000000000000000000000000000000..d8c247dd2658a03233b01799079e367e0ccc1152
--- /dev/null
+++ b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSCalculateTransmission.py
@@ -0,0 +1,372 @@
+# pylint: disable=invalid-name
+
+""" SANSCalculateTransmission algorithm calculates the transmission correction of a SANS workspace."""
+from __future__ import (absolute_import, division, print_function)
+from mantid.kernel import (Direction, StringListValidator, PropertyManagerProperty)
+from mantid.api import (DataProcessorAlgorithm, MatrixWorkspaceProperty, AlgorithmFactory, PropertyMode)
+from sans.common.constants import EMPTY_NAME
+from sans.common.general_functions import create_unmanaged_algorithm
+from sans.common.enums import (RangeStepType, RebinType, FitType, DataType)
+from sans.state.state_base import create_deserialized_sans_state_from_property_manager
+from sans.algorithm_detail.calculate_transmission_helper import (get_detector_id_for_spectrum_number,
+                                                                 get_workspace_indices_for_monitors,
+                                                                 apply_flat_background_correction_to_monitors,
+                                                                 apply_flat_background_correction_to_detectors,
+                                                                 get_region_of_interest)
+
+
+class SANSCalculateTransmission(DataProcessorAlgorithm):
+    def category(self):
+        return 'SANS\\Adjust'
+
+    def summary(self):
+        return 'Calculates the transmission for a SANS reduction.'
+
+    def PyInit(self):
+        # State
+        self.declareProperty(PropertyManagerProperty('SANSState'),
+                             doc='A property manager which fulfills the SANSState contract.')
+
+        # Input workspace in TOF
+        self.declareProperty(MatrixWorkspaceProperty("TransmissionWorkspace", '',
+                                                     optional=PropertyMode.Mandatory, direction=Direction.Input),
+                             doc='The transmission workspace in time-of-flight units.')
+        self.declareProperty(MatrixWorkspaceProperty("DirectWorkspace", '',
+                                                     optional=PropertyMode.Mandatory, direction=Direction.Input),
+                             doc='The direct workspace in time-of-flight units.')
+        allowed_data = StringListValidator([DataType.to_string(DataType.Sample),
+                                            DataType.to_string(DataType.Can)])
+        self.declareProperty("DataType", DataType.to_string(DataType.Sample),
+                             validator=allowed_data, direction=Direction.Input,
+                             doc="The component of the instrument which is to be reduced.")
+
+        # Output workspace
+        self.declareProperty(MatrixWorkspaceProperty("OutputWorkspace", '', direction=Direction.Output),
+                             doc='A calculated transmission workspace in units of wavelength.')
+        self.declareProperty(MatrixWorkspaceProperty("UnfittedData", '', direction=Direction.Output),
+                             doc='An unfitted data in units of wavelength.')
+
+    def PyExec(self):
+        # Read the state
+        state_property_manager = self.getProperty("SANSState").value
+        state = create_deserialized_sans_state_from_property_manager(state_property_manager)
+        calculate_transmission_state = state.adjustment.calculate_transmission
+        # The calculation of the transmission has the following steps:
+        # 1. Get all spectrum numbers which take part in the transmission calculation
+        # 2. Clean up the transmission and direct workspaces, ie peak prompt correction, flat background calculation,
+        #    wavelength conversion and rebinning of the data.
+        # 3. Run the CalculateTransmission algorithm
+        transmission_workspace = self.getProperty("TransmissionWorkspace").value
+        direct_workspace = self.getProperty("DirectWorkspace").value
+        incident_monitor_spectrum_number = calculate_transmission_state.incident_monitor
+        if incident_monitor_spectrum_number is None:
+            incident_monitor_spectrum_number = calculate_transmission_state.default_incident_monitor
+
+        # 1. Get relevant spectra
+        detector_id_incident_monitor = get_detector_id_for_spectrum_number(transmission_workspace,
+                                                                           incident_monitor_spectrum_number)
+        detector_ids_roi, detector_id_transmission_monitor, detector_id_default_transmission_monitor = \
+            self._get_detector_ids_for_transmission_calculation(transmission_workspace, calculate_transmission_state)
+        all_detector_ids = [detector_id_incident_monitor]
+
+        if len(detector_ids_roi) > 0:
+            all_detector_ids.extend(detector_ids_roi)
+        elif detector_id_transmission_monitor is not None:
+            all_detector_ids.append(detector_id_transmission_monitor)
+        elif detector_id_default_transmission_monitor is not None:
+            all_detector_ids.append(detector_id_default_transmission_monitor)
+        else:
+            raise RuntimeError("SANSCalculateTransmission: No region of interest or transmission monitor selected.")
+
+        # 2. Clean transmission data
+        data_type_string = self.getProperty("DataType").value
+        data_type = DataType.from_string(data_type_string)
+        transmission_workspace = self._get_corrected_wavelength_workspace(transmission_workspace, all_detector_ids,
+                                                                          calculate_transmission_state)
+        direct_workspace = self._get_corrected_wavelength_workspace(direct_workspace, all_detector_ids,
+                                                                    calculate_transmission_state)
+
+        # 3. Fit
+        output_workspace, unfitted_transmission_workspace = \
+            self._perform_fit(transmission_workspace, direct_workspace, detector_ids_roi,
+                              detector_id_transmission_monitor, detector_id_default_transmission_monitor,
+                              detector_id_incident_monitor, calculate_transmission_state, data_type)
+
+        self.setProperty("OutputWorkspace", output_workspace)
+        if unfitted_transmission_workspace:
+            self.setProperty("UnfittedData", unfitted_transmission_workspace)
+
+    def _perform_fit(self, transmission_workspace, direct_workspace,
+                     transmission_roi_detector_ids, transmission_monitor_detector_id,
+                     transmission_monitor_detector_id_default, incident_monitor_detector_id,
+                     calculate_transmission_state, data_type):
+        """
+        This performs the actual transmission calculation.
+
+        :param transmission_workspace: the corrected transmission workspace
+        :param direct_workspace: the corrected direct workspace
+        :param transmission_roi_detector_ids: the roi detector ids
+        :param transmission_monitor_detector_id: the transmission monitor detector id
+        :param transmission_monitor_detector_id_default: the default transmission monitor id
+        :param incident_monitor_detector_id: the incident monitor id
+        :param calculate_transmission_state: the state for the transmission calculation
+        :param data_type: the data type which is currently being investigated, ie if it is a sample or a can run.
+        :return: a fitted workspace and an unfitted workspace
+        """
+
+        wavelength_low = calculate_transmission_state.wavelength_low
+        wavelength_high = calculate_transmission_state.wavelength_high
+        wavelength_step = calculate_transmission_state.wavelength_step
+        wavelength_step_type = calculate_transmission_state.wavelength_step_type
+        prefix = 1.0 if wavelength_step_type is RangeStepType.Lin else -1.0
+        wavelength_step *= prefix
+        rebin_params = str(wavelength_low) + "," + str(wavelength_step) + "," + str(wavelength_high)
+
+        trans_name = "CalculateTransmission"
+        trans_options = {"SampleRunWorkspace": transmission_workspace,
+                         "DirectRunWorkspace": direct_workspace,
+                         "OutputWorkspace": EMPTY_NAME,
+                         "IncidentBeamMonitor": incident_monitor_detector_id,
+                         "RebinParams": rebin_params,
+                         "OutputUnfittedData": True}
+
+        # If we have a region of interest we use it else we use the transmission monitor
+
+        if len(transmission_roi_detector_ids) > 0:
+            trans_options.update({"TransmissionROI": transmission_roi_detector_ids})
+        elif transmission_monitor_detector_id is not None:
+            trans_options.update({"TransmissionMonitor": transmission_monitor_detector_id})
+        elif transmission_monitor_detector_id_default:
+            trans_options.update({"TransmissionMonitor": transmission_monitor_detector_id_default})
+        else:
+            raise RuntimeError("No transmission monitor has been provided.")
+
+        # Get the fit setting for the correct data type, ie either for the Sample of the Can
+        fit_type = calculate_transmission_state.fit[DataType.to_string(data_type)].fit_type
+        if fit_type is FitType.Log:
+            fit_string = "Log"
+        elif fit_type is FitType.Polynomial:
+            fit_string = "Polynomial"
+        else:
+            fit_string = "Linear"
+
+        trans_options.update({"FitMethod": fit_string})
+        if fit_type is FitType.Polynomial:
+            polynomial_order = calculate_transmission_state.fit[DataType.to_string(data_type)].polynomial_order
+            trans_options.update({"PolynomialOrder": polynomial_order})
+
+        trans_alg = create_unmanaged_algorithm(trans_name, **trans_options)
+        trans_alg.execute()
+
+        fitted_transmission_workspace = trans_alg.getProperty("OutputWorkspace").value
+        try:
+            unfitted_transmission_workspace = trans_alg.getProperty("UnfittedData").value
+        except RuntimeError:
+            unfitted_transmission_workspace = None
+
+        # Set the y label correctly for the fitted and unfitted transmission workspaces
+        y_unit_label_transmission_ratio = "Transmission"
+        if fitted_transmission_workspace:
+            fitted_transmission_workspace.setYUnitLabel(y_unit_label_transmission_ratio)
+        if unfitted_transmission_workspace:
+            unfitted_transmission_workspace.setYUnitLabel(y_unit_label_transmission_ratio)
+
+        if fit_type is FitType.NoFit:
+            output_workspace = unfitted_transmission_workspace
+        else:
+            output_workspace = fitted_transmission_workspace
+        return output_workspace, unfitted_transmission_workspace
+
+    def _get_detector_ids_for_transmission_calculation(self, transmission_workspace, calculate_transmission_state):
+        """
+        Get the detector ids which participate in the transmission calculation.
+
+        This can come either from a ROI/MASK/RADIUS selection or from a transmission monitor, not both.
+        :param transmission_workspace: the transmission workspace.
+        :param calculate_transmission_state: a SANSStateCalculateTransmission object.
+        :return: a list of detector ids for ROI and a detector id for the transmission monitor, either can be None
+        """
+        # Get the potential ROI detector ids
+        transmission_radius = calculate_transmission_state.transmission_radius_on_detector
+        transmission_roi = calculate_transmission_state.transmission_roi_files
+        transmission_mask = calculate_transmission_state.transmission_mask_files
+        detector_ids_roi = get_region_of_interest(transmission_workspace, transmission_radius, transmission_roi,
+                                                  transmission_mask)
+
+        # Get the potential transmission monitor detector id
+        transmission_monitor_spectrum_number = calculate_transmission_state.transmission_monitor
+        detector_id_transmission_monitor = None
+        if transmission_monitor_spectrum_number is not None:
+            detector_id_transmission_monitor = get_detector_id_for_spectrum_number(transmission_workspace,
+                                                                                   transmission_monitor_spectrum_number)
+
+        # Get the default transmission monitor detector id. This is our fallback if nothing else was specified.
+        default_transmission_monitor = calculate_transmission_state.default_transmission_monitor
+
+        detector_id_default_transmission_monitor = get_detector_id_for_spectrum_number(transmission_workspace,
+                                                                                       default_transmission_monitor)
+
+        return detector_ids_roi, detector_id_transmission_monitor, detector_id_default_transmission_monitor
+
+    def _get_corrected_wavelength_workspace(self, workspace, detector_ids, calculate_transmission_state):
+        """
+        Performs a prompt peak correction, a background correction, converts to wavelength and rebins.
+
+        :param workspace: the workspace which is being corrected.
+        :param detector_ids: a list of relevant detector ids
+        :param calculate_transmission_state: a SANSStateCalculateTransmission state
+        :return:  a corrected workspace.
+        """
+        # Extract the relevant spectra. These include
+        # 1. The incident monitor spectrum
+        # 2. The transmission spectra, be it monitor or ROI based.
+        # A previous implementation of this code had a comment which suggested
+        # that we have to exclude unused spectra as the interpolation runs into
+        # problems if we don't.
+        extract_name = "ExtractSpectra"
+        extract_options = {"InputWorkspace": workspace,
+                           "OutputWorkspace": EMPTY_NAME,
+                           "DetectorList": detector_ids}
+        extract_alg = create_unmanaged_algorithm(extract_name, **extract_options)
+        extract_alg.execute()
+        workspace = extract_alg.getProperty("OutputWorkspace").value
+
+        # Make sure that we still have spectra in the workspace
+        if workspace.getNumberHistograms() == 0:
+            raise RuntimeError("SANSCalculateTransmissionCorrection: The transmission workspace does "
+                               "not seem to have any spectra.")
+
+        # ----------------------------------
+        # Perform the prompt peak correction
+        # ----------------------------------
+        prompt_peak_correction_min = calculate_transmission_state.prompt_peak_correction_min
+        prompt_peak_correction_max = calculate_transmission_state.prompt_peak_correction_max
+        prompt_peak_correction_enabled = calculate_transmission_state.prompt_peak_correction_enabled
+        workspace = self._perform_prompt_peak_correction(workspace, prompt_peak_correction_min,
+                                                         prompt_peak_correction_max, prompt_peak_correction_enabled)
+
+        # ---------------------------------------
+        # Perform the flat background correction
+        # ---------------------------------------
+        # The flat background correction has two parts:
+        # 1. Corrections on monitors
+        # 2. Corrections on regular detectors
+
+        # Monitor flat background correction
+        workspace_indices_of_monitors = list(get_workspace_indices_for_monitors(workspace))
+        background_tof_monitor_start = calculate_transmission_state.background_TOF_monitor_start
+        background_tof_monitor_stop = calculate_transmission_state.background_TOF_monitor_stop
+        background_tof_general_start = calculate_transmission_state.background_TOF_general_start
+        background_tof_general_stop = calculate_transmission_state.background_TOF_general_stop
+        workspace = apply_flat_background_correction_to_monitors(workspace,
+                                                                 workspace_indices_of_monitors,
+                                                                 background_tof_monitor_start,
+                                                                 background_tof_monitor_stop,
+                                                                 background_tof_general_start,
+                                                                 background_tof_general_stop)
+
+        # Detector flat background correction
+        flat_background_correction_start = calculate_transmission_state.background_TOF_roi_start
+        flat_background_correction_stop = calculate_transmission_state.background_TOF_roi_stop
+        workspace = apply_flat_background_correction_to_detectors(workspace, flat_background_correction_start,
+                                                                  flat_background_correction_stop)
+
+        # ---------------------------------------
+        # Convert to wavelength and rebin
+        # ---------------------------------------
+        # The wavelength setting is reasonably complex.
+        # 1. Use full wavelength range
+        # 2. Use standard settings
+        if calculate_transmission_state.use_full_wavelength_range:
+            wavelength_low = calculate_transmission_state.wavelength_full_range_low
+            wavelength_high = calculate_transmission_state.wavelength_full_range_high
+        else:
+            wavelength_low = calculate_transmission_state.wavelength_low
+            wavelength_high = calculate_transmission_state.wavelength_high
+
+        wavelength_step = calculate_transmission_state.wavelength_step
+        rebin_type = calculate_transmission_state.rebin_type
+        wavelength_step_type = calculate_transmission_state.wavelength_step_type
+
+        convert_name = "SANSConvertToWavelengthAndRebin"
+        convert_options = {"InputWorkspace": workspace,
+                           "WavelengthLow": wavelength_low,
+                           "WavelengthHigh": wavelength_high,
+                           "WavelengthStep": wavelength_step,
+                           "WavelengthStepType": RangeStepType.to_string(wavelength_step_type),
+                           "RebinMode": RebinType.to_string(rebin_type)}
+        convert_alg = create_unmanaged_algorithm(convert_name, **convert_options)
+        convert_alg.setPropertyValue("OutputWorkspace", EMPTY_NAME)
+        convert_alg.setProperty("OutputWorkspace", workspace)
+        convert_alg.execute()
+        return convert_alg.getProperty("OutputWorkspace").value
+
+    def _perform_prompt_peak_correction(self, workspace, prompt_peak_correction_min, prompt_peak_correction_max,
+                                        prompt_peak_correction_enabled):
+        """
+        Prompt peak correction is performed if it is explicitly set by the user.
+
+        :param workspace: the workspace to correct.
+        :param prompt_peak_correction_min: the start time for the prompt peak correction.
+        :param prompt_peak_correction_max: the stop time for the prompt peak correction.
+        :prompt_peak_correction_enabled: flag if prompt peak correction should be enabled
+        :return: a corrected workspace.
+        """
+        # We perform only a prompt peak correction if the start and stop values of the bins we want to remove,
+        # were explicitly set. Some instruments require it, others don't.
+        if prompt_peak_correction_enabled and prompt_peak_correction_min is not None and \
+                        prompt_peak_correction_max is not None:  # noqa
+            remove_name = "RemoveBins"
+            remove_options = {"InputWorkspace": workspace,
+                              "XMin": prompt_peak_correction_min,
+                              "XMax": prompt_peak_correction_max,
+                              "Interpolation": "Linear"}
+            remove_alg = create_unmanaged_algorithm(remove_name, **remove_options)
+            remove_alg.setPropertyValue("OutputWorkspace", EMPTY_NAME)
+            remove_alg.setProperty("OutputWorkspace", workspace)
+            remove_alg.execute()
+            workspace = remove_alg.getProperty("OutputWorkspace").value
+        return workspace
+
+    def validateInputs(self):
+        errors = dict()
+        # Check that the input can be converted into the right state object
+        state_property_manager = self.getProperty("SANSState").value
+        try:
+            state = create_deserialized_sans_state_from_property_manager(state_property_manager)
+            state.property_manager = state_property_manager
+            state.validate()
+        except ValueError as err:
+            errors.update({"SANSCalculateTransmission": str(err)})
+            state = None
+
+        if state is not None:
+            transmission_workspace = self.getProperty("TransmissionWorkspace").value
+            calculate_transmission_state = state.adjustment.calculate_transmission
+            try:
+                incident_monitor = calculate_transmission_state.incident_monitor
+                if incident_monitor is None:
+                    incident_monitor = calculate_transmission_state.default_incident_monitor
+                transmission_workspace.getIndexFromSpectrumNumber(incident_monitor)
+            except RuntimeError:
+                errors.update({"IncidentMonitorSpectrumNumber": "The spectrum number for the incident monitor spectrum "
+                                                                "does not seem to exist for the transmission"
+                                                                " workspace."})
+
+        if state is not None:
+            calculate_transmission_state = state.adjustment.calculate_transmission
+            fit = calculate_transmission_state.fit
+            data_type_string = self.getProperty("DataType").value
+            data_type = DataType.from_string(data_type_string)
+            sample = fit[DataType.to_string(DataType.Sample)]
+            can = fit[DataType.to_string(DataType.Can)]
+            if data_type is DataType.Sample and sample.fit_type is None:
+                errors.update({"DataType": "There does not seem to be a fit type set for the selected data type"})
+            if data_type is DataType.Can and can.fit_type is None:
+                errors.update({"DataType": "There does not seem to be a fit type set for the selected data type"})
+
+        return errors
+
+
+# Register algorithm with Mantid
+AlgorithmFactory.subscribe(SANSCalculateTransmission)
diff --git a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSConvertToQ.py b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSConvertToQ.py
index 89595d544000e6f35fa751938ebfb3fe3f54f04b..38615003d775ce7f04dc3d9c359dc928b81246a3 100644
--- a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSConvertToQ.py
+++ b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSConvertToQ.py
@@ -139,8 +139,8 @@ class SANSConvertToQ(DataProcessorAlgorithm):
 
         Note that it does not perform any q resolution calculation, nor any wavelength-and-pixel adjustment. The
         output workspace contains two numerical axes.
-        @param state: a SANSState object
-        @return: the reduced workspace, the sum of counts workspace, the sum of norms workspace or
+        :param state: a SANSState object
+        :return: the reduced workspace, the sum of counts workspace, the sum of norms workspace or
                  the reduced workspace, None, None
         """
         data_workspace = self.getProperty("InputWorkspace").value
@@ -200,8 +200,8 @@ class SANSConvertToQ(DataProcessorAlgorithm):
     def _set_partial_workspaces(self, sum_of_counts_workspace, sum_of_norms_workspace):
         """
         Sets the partial output, ie the sum of the counts workspace and the sum of the normalization workspace
-        @param sum_of_counts_workspace: the sum of the counts workspace
-        @param sum_of_norms_workspace: the sum of the normalization workspace
+        :param sum_of_counts_workspace: the sum of the counts workspace
+        :param sum_of_norms_workspace: the sum of the normalization workspace
         """
         self.declareProperty(MatrixWorkspaceProperty("SumOfCounts", '',
                                                      optional=PropertyMode.Optional, direction=Direction.Output),
diff --git a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSCreateAdjustmentWorkspaces.py b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSCreateAdjustmentWorkspaces.py
new file mode 100644
index 0000000000000000000000000000000000000000..fccdeb1f7cb7ff901c740a86e12267fcda8ff18c
--- /dev/null
+++ b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSCreateAdjustmentWorkspaces.py
@@ -0,0 +1,230 @@
+# pylint: disable=invalid-name
+
+""" SANSCreateAdjustmentWorkspaces algorithm creates workspaces for pixel adjustment
+    , wavelength adjustment and pixel-and-wavelength adjustment workspaces.
+"""
+
+from __future__ import (absolute_import, division, print_function)
+from mantid.kernel import (Direction, PropertyManagerProperty, StringListValidator, CompositeValidator)
+from mantid.api import (DataProcessorAlgorithm, MatrixWorkspaceProperty, AlgorithmFactory, PropertyMode,
+                        WorkspaceUnitValidator)
+
+from sans.common.constants import EMPTY_NAME
+from sans.common.enums import (DataType, DetectorType)
+from sans.common.general_functions import create_unmanaged_algorithm
+from sans.state.state_base import create_deserialized_sans_state_from_property_manager
+
+
+class SANSCreateAdjustmentWorkspaces(DataProcessorAlgorithm):
+    def category(self):
+        return 'SANS\\Adjust'
+
+    def summary(self):
+        return 'Calculates wavelength adjustment, pixel adjustment workspaces and wavelength-and-pixel ' \
+               'adjustment workspaces.'
+
+    def PyInit(self):
+        # ---------------
+        # INPUT
+        # ---------------
+        # State
+        self.declareProperty(PropertyManagerProperty('SANSState'),
+                             doc='A property manager which fulfills the SANSState contract.')
+
+        # Input workspaces
+        self.declareProperty(MatrixWorkspaceProperty('TransmissionWorkspace', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Input),
+                             doc='The transmission workspace.')
+        self.declareProperty(MatrixWorkspaceProperty('DirectWorkspace', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Input),
+                             doc='The direct workspace.')
+        self.declareProperty(MatrixWorkspaceProperty('MonitorWorkspace', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Input),
+                             doc='The scatter monitor workspace. This workspace only contains monitors.')
+
+        workspace_validator = CompositeValidator()
+        workspace_validator.add(WorkspaceUnitValidator("Wavelength"))
+        self.declareProperty(MatrixWorkspaceProperty('SampleData', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Input,
+                                                     validator=workspace_validator),
+                             doc='A workspace cropped to the detector to be reduced (the SAME as the input to Q1D). '
+                                 'This used to verify the solid angle. The workspace is not modified, just inspected.')
+
+        # The component
+        allowed_detector_types = StringListValidator([DetectorType.to_string(DetectorType.HAB),
+                                                      DetectorType.to_string(DetectorType.LAB)])
+        self.declareProperty("Component", DetectorType.to_string(DetectorType.LAB),
+                             validator=allowed_detector_types, direction=Direction.Input,
+                             doc="The component of the instrument which is currently being investigated.")
+
+        # The data type
+        allowed_data = StringListValidator([DataType.to_string(DataType.Sample),
+                                            DataType.to_string(DataType.Can)])
+        self.declareProperty("DataType", DataType.to_string(DataType.Sample),
+                             validator=allowed_data, direction=Direction.Input,
+                             doc="The component of the instrument which is to be reduced.")
+
+        # Slice factor for monitor
+        self.declareProperty('SliceEventFactor', 1.0, direction=Direction.Input, doc='The slice factor for the monitor '
+                                                                                     'normalization. This factor is the'
+                                                                                     ' one obtained from event '
+                                                                                     'slicing.')
+
+        # ---------------
+        # Output
+        # ---------------
+        self.declareProperty(MatrixWorkspaceProperty('OutputWorkspaceWavelengthAdjustment', '',
+                                                     direction=Direction.Output),
+                             doc='The workspace for wavelength-based adjustments.')
+
+        self.declareProperty(MatrixWorkspaceProperty('OutputWorkspacePixelAdjustment', '',
+                                                     direction=Direction.Output),
+                             doc='The workspace for wavelength-based adjustments.')
+
+        self.declareProperty(MatrixWorkspaceProperty('OutputWorkspaceWavelengthAndPixelAdjustment', '',
+                                                     direction=Direction.Output),
+                             doc='The workspace for, both, wavelength- and pixel-based adjustments.')
+
+    def PyExec(self):
+        # Read the state
+        state_property_manager = self.getProperty("SANSState").value
+        state = create_deserialized_sans_state_from_property_manager(state_property_manager)
+
+        # --------------------------------------
+        # Get the monitor normalization workspace
+        # --------------------------------------
+        monitor_normalization_workspace = self._get_monitor_normalization_workspace(state)
+
+        # --------------------------------------
+        # Get the calculated transmission
+        # --------------------------------------
+        calculated_transmission_workspace, unfitted_transmission_workspace =\
+            self._get_calculated_transmission_workspace(state)
+
+        # --------------------------------------
+        # Get the wide angle correction workspace
+        # --------------------------------------
+        wave_length_and_pixel_adjustment_workspace = self._get_wide_angle_correction_workspace(state,
+                                                                   calculated_transmission_workspace)  # noqa
+
+        # --------------------------------------------
+        # Get the full wavelength and pixel adjustment
+        # --------------------------------------------
+        wave_length_adjustment_workspace, \
+        pixel_length_adjustment_workspace = self._get_wavelength_and_pixel_adjustment_workspaces(state,
+                                                                            monitor_normalization_workspace,  # noqa
+                                                                            calculated_transmission_workspace)  # noqa
+
+        if wave_length_adjustment_workspace:
+            self.setProperty("OutputWorkspaceWavelengthAdjustment", wave_length_adjustment_workspace)
+        if pixel_length_adjustment_workspace:
+            self.setProperty("OutputWorkspacePixelAdjustment", pixel_length_adjustment_workspace)
+        if wave_length_and_pixel_adjustment_workspace:
+            self.setProperty("OutputWorkspaceWavelengthAndPixelAdjustment", wave_length_and_pixel_adjustment_workspace)
+
+        # TODO: Nice to have: Provide diagnostic output workspaces which could be output either directly to the
+        #                     ADS or let it percolate up via SANSCreateAdjustmentWorkspaces->SANSReductionCore->
+        #                     SANSSingleReduction and then add it to the ADS
+
+    def _get_wavelength_and_pixel_adjustment_workspaces(self, state,
+                                                        monitor_normalization_workspace,
+                                                        calculated_transmission_workspace):
+        component = self.getProperty("Component").value
+
+        wave_pixel_adjustment_name = "SANSCreateWavelengthAndPixelAdjustment"
+        serialized_state = state.property_manager
+        wave_pixel_adjustment_options = {"SANSState": serialized_state,
+                                         "NormalizeToMonitorWorkspace": monitor_normalization_workspace,
+                                         "OutputWorkspaceWavelengthAdjustment": EMPTY_NAME,
+                                         "OutputWorkspacePixelAdjustment": EMPTY_NAME,
+                                         "Component": component}
+        if calculated_transmission_workspace:
+            wave_pixel_adjustment_options.update({"TransmissionWorkspace": calculated_transmission_workspace})
+        wave_pixel_adjustment_alg = create_unmanaged_algorithm(wave_pixel_adjustment_name,
+                                                               **wave_pixel_adjustment_options)
+
+        wave_pixel_adjustment_alg.execute()
+        wavelength_out = wave_pixel_adjustment_alg.getProperty("OutputWorkspaceWavelengthAdjustment").value
+        pixel_out = wave_pixel_adjustment_alg.getProperty("OutputWorkspacePixelAdjustment").value
+        return wavelength_out, pixel_out
+
+    def _get_monitor_normalization_workspace(self, state):
+        """
+        Gets the monitor normalization workspace via the SANSNormalizeToMonitor algorithm
+
+        :param state: a SANSState object.
+        :return: the normalization workspace.
+        """
+        monitor_workspace = self.getProperty("MonitorWorkspace").value
+        scale_factor = self.getProperty("SliceEventFactor").value
+
+        normalize_name = "SANSNormalizeToMonitor"
+        serialized_state = state.property_manager
+        normalize_option = {"InputWorkspace": monitor_workspace,
+                            "OutputWorkspace": EMPTY_NAME,
+                            "SANSState": serialized_state,
+                            "ScaleFactor": scale_factor}
+        normalize_alg = create_unmanaged_algorithm(normalize_name, **normalize_option)
+        normalize_alg.execute()
+        ws = normalize_alg.getProperty("OutputWorkspace").value
+        return ws
+
+    def _get_calculated_transmission_workspace(self, state):
+        """
+        Creates the fitted transmission workspace.
+
+        Note that this step is not mandatory. If no transmission and direct workspaces are provided, then we
+        don't have to do anything here.
+        :param state: a SANSState object.
+        :return: a fitted transmission workspace and the unfitted data.
+        """
+        transmission_workspace = self.getProperty("TransmissionWorkspace").value
+        direct_workspace = self.getProperty("DirectWorkspace").value
+        if transmission_workspace and direct_workspace:
+            data_type = self.getProperty("DataType").value
+            transmission_name = "SANSCalculateTransmission"
+            serialized_state = state.property_manager
+            transmission_options = {"TransmissionWorkspace": transmission_workspace,
+                                    "DirectWorkspace": direct_workspace,
+                                    "SANSState": serialized_state,
+                                    "DataType": data_type,
+                                    "OutputWorkspace": EMPTY_NAME,
+                                    "UnfittedData": EMPTY_NAME}
+            transmission_alg = create_unmanaged_algorithm(transmission_name, **transmission_options)
+            transmission_alg.execute()
+            fitted_data = transmission_alg.getProperty("OutputWorkspace").value
+            unfitted_data = transmission_alg.getProperty("UnfittedData").value
+        else:
+            fitted_data = None
+            unfitted_data = None
+        return fitted_data, unfitted_data
+
+    def _get_wide_angle_correction_workspace(self, state, calculated_transmission_workspace):
+        wide_angle_correction = state.adjustment.wide_angle_correction
+        sample_data = self.getProperty("SampleData").value
+        workspace = None
+        if wide_angle_correction and sample_data and calculated_transmission_workspace:
+            wide_angle_name = "SANSWideAngleCorrection"
+            wide_angle_options = {"SampleData": sample_data,
+                                  "TransmissionData": calculated_transmission_workspace,
+                                  "OutputWorkspace": EMPTY_NAME}
+            wide_angle_alg = create_unmanaged_algorithm(wide_angle_name, **wide_angle_options)
+            wide_angle_alg.execute()
+            workspace = wide_angle_alg.getProperty("OutputWorkspace").value
+        return workspace
+
+    def validateInputs(self):
+        errors = dict()
+        # Check that the input can be converted into the right state object
+        state_property_manager = self.getProperty("SANSState").value
+        try:
+            state = create_deserialized_sans_state_from_property_manager(state_property_manager)
+            state.property_manager = state_property_manager
+            state.validate()
+        except ValueError as err:
+            errors.update({"SANSCreateAdjustmentWorkspaces": str(err)})
+        return errors
+
+
+# Register algorithm with Mantid
+AlgorithmFactory.subscribe(SANSCreateAdjustmentWorkspaces)
diff --git a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSCreateWavelengthAndPixelAdjustment.py b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSCreateWavelengthAndPixelAdjustment.py
new file mode 100644
index 0000000000000000000000000000000000000000..72326412ceab19ecc30ede61b3774002e1f2a656
--- /dev/null
+++ b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSCreateWavelengthAndPixelAdjustment.py
@@ -0,0 +1,230 @@
+# pylint: disable=invalid-name
+
+""" SANSCreateWavelengthAndPixelAdjustment algorithm creates workspaces for pixel adjustment
+    and wavelength adjustment.
+"""
+
+from __future__ import (absolute_import, division, print_function)
+from mantid.kernel import (Direction, StringListValidator, PropertyManagerProperty, CompositeValidator)
+
+from mantid.api import (DataProcessorAlgorithm, MatrixWorkspaceProperty, AlgorithmFactory, PropertyMode,
+                        WorkspaceUnitValidator)
+
+from sans.state.state_base import create_deserialized_sans_state_from_property_manager
+from sans.common.enums import (RangeStepType, DetectorType)
+from sans.common.constants import EMPTY_NAME
+from sans.common.general_functions import create_unmanaged_algorithm
+
+
+class SANSCreateWavelengthAndPixelAdjustment(DataProcessorAlgorithm):
+    def category(self):
+        return 'SANS\\Adjust'
+
+    def summary(self):
+        return 'Calculates wavelength adjustment and pixel adjustment workspaces.'
+
+    def PyInit(self):
+        # State
+        self.declareProperty(PropertyManagerProperty('SANSState'),
+                             doc='A property manager which fulfills the SANSState contract.')
+        # Input workspaces
+        workspace_validator = CompositeValidator()
+        workspace_validator.add(WorkspaceUnitValidator("Wavelength"))
+
+        self.declareProperty(MatrixWorkspaceProperty("TransmissionWorkspace", '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Input,
+                                                     validator=workspace_validator),
+                             doc='The calculated transmission workspace in wavelength units.')
+        self.declareProperty(MatrixWorkspaceProperty("NormalizeToMonitorWorkspace", '',
+                                                     optional=PropertyMode.Mandatory, direction=Direction.Input,
+                                                     validator=workspace_validator),
+                             doc='The monitor normalization workspace in wavelength units.')
+        allowed_detector_types = StringListValidator([DetectorType.to_string(DetectorType.HAB),
+                                                      DetectorType.to_string(DetectorType.LAB)])
+        self.declareProperty("Component", DetectorType.to_string(DetectorType.LAB),
+                             validator=allowed_detector_types, direction=Direction.Input,
+                             doc="The component of the instrument which is currently being investigated.")
+
+        # Output workspace
+        self.declareProperty(MatrixWorkspaceProperty("OutputWorkspaceWavelengthAdjustment", '',
+                                                     direction=Direction.Output),
+                             doc='A wavelength adjustment output workspace.')
+        self.declareProperty(MatrixWorkspaceProperty("OutputWorkspacePixelAdjustment", '',
+                                                     direction=Direction.Output),
+                             doc='A pixel adjustment output workspace.')
+
+    def PyExec(self):
+        # Read the state
+        state_property_manager = self.getProperty("SANSState").value
+        state = create_deserialized_sans_state_from_property_manager(state_property_manager)
+        wavelength_and_pixel_adjustment_state = state.adjustment.wavelength_and_pixel_adjustment
+
+        # Get the wavelength adjustment workspace
+        transmission_workspace = self.getProperty("TransmissionWorkspace").value
+        monitor_normalization_workspace = self.getProperty("NormalizeToMonitorWorkspace").value
+
+        component = self.getProperty("Component").value
+        wavelength_adjustment_file = wavelength_and_pixel_adjustment_state.adjustment_files[component].wavelength_adjustment_file
+
+        rebin_string = self._get_rebin_string(wavelength_and_pixel_adjustment_state)
+        wavelength_adjustment_workspace = self._get_wavelength_adjustment_workspace(wavelength_adjustment_file,
+                                                                                    transmission_workspace,
+                                                                                    monitor_normalization_workspace,
+                                                                                    rebin_string)
+
+        # Get the pixel adjustment workspace
+        pixel_adjustment_file = wavelength_and_pixel_adjustment_state.adjustment_files[component].pixel_adjustment_file
+        idf_path = wavelength_and_pixel_adjustment_state.idf_path
+        pixel_adjustment_workspace = self._get_pixel_adjustment_workspace(pixel_adjustment_file, component, idf_path)
+
+        # Set the output
+        if wavelength_adjustment_workspace:
+            self.setProperty("OutputWorkspaceWavelengthAdjustment", wavelength_adjustment_workspace)
+        if pixel_adjustment_workspace:
+            self.setProperty("OutputWorkspacePixelAdjustment", pixel_adjustment_workspace)
+
+    def _get_wavelength_adjustment_workspace(self, wavelength_adjustment_file, transmission_workspace,
+                                             monitor_normalization_workspace, rebin_string):
+        """
+        This creates a workspace with wavelength adjustments, ie this will be a correction for the bins, but it will
+        be the same for all pixels. This is essentially the product of several workspaces.
+        The participating workspaces are:
+        1. A workspace loaded from a calibration file
+        2.. The workspace resulting from the monitor normalization
+        3. The workspace resulting from the transmission calculation (using SANSCalculateTransmission) if applicable
+
+        :param wavelength_adjustment_file: the file path to the wavelength adjustment file
+        :param transmission_workspace: the calculated transmission workspace (which can be None)
+        :param monitor_normalization_workspace: the monitor normalization workspace
+        :param rebin_string: the parameters for rebinning
+        :return: a general wavelength adjustment workspace
+        """
+        # 1. Get the wavelength correction workspace from the file
+        wavelength_adjustment_workspaces = []
+        if wavelength_adjustment_file:
+            wavelength_correction_workspace_from_file = \
+                self._load_wavelength_correction_file(wavelength_adjustment_file)
+            wavelength_adjustment_workspaces.append(wavelength_correction_workspace_from_file)
+
+        # 2. Normalization
+        wavelength_adjustment_workspaces.append(monitor_normalization_workspace)
+
+        # 3. Transmission Calculation
+        if transmission_workspace:
+            wavelength_adjustment_workspaces.append(transmission_workspace)
+
+        # Multiply all workspaces
+        wavelength_adjustment_workspace = None
+        for workspace in wavelength_adjustment_workspaces:
+            # First we need to change the binning such that is matches the binning of the main data workspace
+            rebin_name = "Rebin"
+            rebin_options = {"InputWorkspace": workspace,
+                             "Params": rebin_string,
+                             "OutputWorkspace": EMPTY_NAME}
+            rebin_alg = create_unmanaged_algorithm(rebin_name, **rebin_options)
+            rebin_alg.execute()
+            rebinned_workspace = rebin_alg.getProperty("OutputWorkspace").value
+            if wavelength_adjustment_workspace is None:
+                wavelength_adjustment_workspace = rebinned_workspace
+            else:
+                multiply_name = "Multiply"
+                multiply_options = {"LHSWorkspace": rebinned_workspace,
+                                    "RHSWorkspace": wavelength_adjustment_workspace,
+                                    "OutputWorkspace": EMPTY_NAME}
+                multiply_alg = create_unmanaged_algorithm(multiply_name, **multiply_options)
+                multiply_alg.execute()
+                wavelength_adjustment_workspace = multiply_alg.getProperty("OutputWorkspace").value
+        return wavelength_adjustment_workspace
+
+    def _load_wavelength_correction_file(self, file_name):
+        correction_workspace = None
+        if file_name:
+            load_name = "LoadRKH"
+            load_option = {"Filename": file_name,
+                           "OutputWorkspace": EMPTY_NAME,
+                           "FirstColumnValue": "Wavelength"}
+            load_alg = create_unmanaged_algorithm(load_name, **load_option)
+            load_alg.execute()
+            output_workspace = load_alg.getProperty("OutputWorkspace").value
+            # We require HistogramData and not PointData
+            if not output_workspace.isHistogramData():
+                convert_name = "ConvertToHistogram"
+                convert_options = {"InputWorkspace": output_workspace,
+                                   "OutputWorkspace": EMPTY_NAME}
+                convert_alg = create_unmanaged_algorithm(convert_name, **convert_options)
+                convert_alg.execute()
+                output_workspace = convert_alg.getProperty("OutputWorkspace").value
+            correction_workspace = output_workspace
+        return correction_workspace
+
+    def _get_pixel_adjustment_workspace(self, pixel_adjustment_file, component, idf_path):
+        """
+        This get the pixel-by-pixel adjustment of the workspace
+
+        :param pixel_adjustment_file: full file path to the pixel adjustment file
+        :param component: the component which is currently being investigated
+        :param idf_path: the idf path
+        :return: the pixel adjustment workspace
+        """
+        if pixel_adjustment_file:
+            load_name = "LoadRKH"
+            load_options = {"Filename": pixel_adjustment_file,
+                            "OutputWorkspace": EMPTY_NAME,
+                            "FirstColumnValue": "SpectrumNumber"}
+            load_alg = create_unmanaged_algorithm(load_name, **load_options)
+            load_alg.execute()
+            output_workspace = load_alg.getProperty("OutputWorkspace").value
+
+            # Add an instrument to the workspace
+            instrument_name = "LoadInstrument"
+            instrument_options = {"Workspace": output_workspace,
+                                  "Filename": idf_path,
+                                  "RewriteSpectraMap": False}
+            instrument_alg = create_unmanaged_algorithm(instrument_name, **instrument_options)
+            instrument_alg.execute()
+
+            # Crop to the required detector
+            crop_name = "SANSCrop"
+            crop_options = {"InputWorkspace": output_workspace,
+                            "OutputWorkspace": EMPTY_NAME,
+                            "Component": component}
+            crop_alg = create_unmanaged_algorithm(crop_name, **crop_options)
+            crop_alg.execute()
+            pixel_adjustment_workspace = crop_alg.getProperty("OutputWorkspace").value
+        else:
+            pixel_adjustment_workspace = None
+        return pixel_adjustment_workspace
+
+    def _get_rebin_string(self, wavelength_and_pixel_adjustment_state):
+        wavelength_low = wavelength_and_pixel_adjustment_state.wavelength_low
+        wavelength_high = wavelength_and_pixel_adjustment_state.wavelength_high
+        wavelength_step = wavelength_and_pixel_adjustment_state.wavelength_step
+        wavelength_step_type = -1.0 if wavelength_and_pixel_adjustment_state.wavelength_step_type \
+                                       is RangeStepType.Log else 1.0  # noqa
+
+        # Create a rebin string from the wavelength information
+        wavelength_step *= wavelength_step_type
+        return str(wavelength_low) + "," + str(wavelength_step) + "," + str(wavelength_high)
+
+    def validateInputs(self):
+        errors = dict()
+        # Check that the input can be converted into the right state object
+        state_property_manager = self.getProperty("SANSState").value
+        try:
+            state = create_deserialized_sans_state_from_property_manager(state_property_manager)
+            state.validate()
+        except ValueError as err:
+            errors.update({"SANSCreateWavelengthAndPixelAdjustment": str(err)})
+
+        # The transmission and the normalize to monitor workspace must have exactly one histogram present
+        transmission_workspace = self.getProperty("TransmissionWorkspace").value
+        normalize_to_monitor = self.getProperty("NormalizeToMonitorWorkspace").value
+        if transmission_workspace and transmission_workspace.getNumberHistograms() != 1:
+            errors.update({"TransmissionWorkspace": "The transmission workspace can have only one histogram."})
+        if normalize_to_monitor.getNumberHistograms() != 1:
+            errors.update({"NormalizeToMonitorWorkspace": "The monitor normalization workspace can have"
+                                                          " only one histogram."})
+        return errors
+
+# Register algorithm with Mantid
+AlgorithmFactory.subscribe(SANSCreateWavelengthAndPixelAdjustment)
diff --git a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSCrop.py b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSCrop.py
index dabcffda1a62b3f31b2fd42c34315c265b0e887e..080e8f59905ae0a7d48c2178cd2d9eafe6a5d6a4 100644
--- a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSCrop.py
+++ b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSCrop.py
@@ -68,10 +68,11 @@ class SANSCrop(DataProcessorAlgorithm):
         else:
             component = DetectorType.LAB
 
-        # TODO: Make this nicer
         instrument = workspace.getInstrument()
         instrument_name = instrument.getName().strip()
 
+        # TODO: Can clean up here: The detector bank selection could be made nicer here, but it is currently not
+        #                          essential.
         if instrument_name == "SANS2D":
             component = "front-detector" if component is DetectorType.HAB else "rear-detector"
         elif instrument_name == "LOQ":
diff --git a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSLoad.py b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSLoad.py
index 246fbe0b008f5addf1cd47883d6c5d5e41753217..f159bf65ee4c15445d4d19fc46d79f8f556790be 100644
--- a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSLoad.py
+++ b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSLoad.py
@@ -148,11 +148,11 @@ class SANSLoad(DataProcessorAlgorithm):
             progress_move.report("Finished moving the workspaces.")
 
         # Set output workspaces
-        for workspace_type, workspace in workspaces.items():
+        for workspace_type, workspace in list(workspaces.items()):
             self.set_output_for_workspaces(workspace_type, workspace)
 
         # Set the output monitor workspaces
-        for workspace_type, workspace in workspace_monitors.items():
+        for workspace_type, workspace in list(workspace_monitors.items()):
             self.set_output_for_monitor_workspaces(workspace_type, workspace)
 
     def validateInputs(self):
@@ -346,7 +346,7 @@ class SANSLoad(DataProcessorAlgorithm):
         move_alg = create_child_algorithm(self, move_name, **move_options)
 
         # The workspaces are stored in a dict: workspace_names (sample_scatter, etc) : ListOfWorkspaces
-        for key, workspace_list in workspaces.items():
+        for key, workspace_list in list(workspaces.items()):
             for workspace in workspace_list:
                 move_alg.setProperty("Workspace", workspace)
                 move_alg.execute()
diff --git a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSMove.py b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSMove.py
index 51333bb5da91fdf19f4737461ebe0df5892d8d9b..e1975d3a0c7f09709241acce0398ccabeb35475c 100644
--- a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSMove.py
+++ b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSMove.py
@@ -40,7 +40,7 @@ def get_detector_for_component(move_info, component):
         selected_detector = detectors[DetectorType.to_string(DetectorType.LAB)]
     else:
         # Check if the component is part of the detector names
-        for _, detector in detectors.items():
+        for _, detector in list(detectors.items()):
             if detector.detector_name == component or detector.detector_name_short == component:
                 selected_detector = detector
     return selected_detector
diff --git a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSNormalizeToMonitor.py b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSNormalizeToMonitor.py
new file mode 100644
index 0000000000000000000000000000000000000000..c061576b1c1e1817951296c14efdd03b4021c523
--- /dev/null
+++ b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSNormalizeToMonitor.py
@@ -0,0 +1,216 @@
+# pylint: disable=invalid-name
+
+""" SANSNormalizeToMonitor algorithm calculates the normalization to the monitor."""
+
+from __future__ import (absolute_import, division, print_function)
+from mantid.kernel import (Direction, FloatBoundedValidator, PropertyManagerProperty)
+from mantid.api import (DataProcessorAlgorithm, MatrixWorkspaceProperty, AlgorithmFactory, PropertyMode)
+from sans.common.constants import EMPTY_NAME
+from sans.common.general_functions import create_unmanaged_algorithm
+from sans.common.enums import RebinType, RangeStepType
+from sans.state.state_base import create_deserialized_sans_state_from_property_manager
+
+
+class SANSNormalizeToMonitor(DataProcessorAlgorithm):
+    def category(self):
+        return 'SANS\\Adjust'
+
+    def summary(self):
+        return 'Calculates a monitor normalization workspace for a SANS reduction.'
+
+    def PyInit(self):
+        # State
+        self.declareProperty(PropertyManagerProperty('SANSState'),
+                             doc='A property manager which fulfills the SANSState contract.')
+
+        # Input workspace in TOF
+        self.declareProperty(MatrixWorkspaceProperty("InputWorkspace", '',
+                                                     optional=PropertyMode.Mandatory, direction=Direction.Input),
+                             doc='The monitor workspace in time-of-flight units.')
+
+        # A scale factor which could come from event workspace slicing. If the actual data workspace was sliced,
+        # then one needs to scale the monitor measurement proportionally. This input is intended for this matter
+        self.declareProperty('ScaleFactor', defaultValue=1.0, direction=Direction.Input,
+                             validator=FloatBoundedValidator(0.0),
+                             doc='Optional scale factor for the input workspace.')
+
+        # Output workspace
+        self.declareProperty(MatrixWorkspaceProperty("OutputWorkspace", '', direction=Direction.Output),
+                             doc='A monitor normalization workspace in units of wavelength.')
+
+    def PyExec(self):
+        # Read the state
+        state_property_manager = self.getProperty("SANSState").value
+        state = create_deserialized_sans_state_from_property_manager(state_property_manager)
+        normalize_to_monitor_state = state.adjustment.normalize_to_monitor
+
+        # 1. Extract the spectrum of the incident monitor
+        incident_monitor_spectrum_number = normalize_to_monitor_state.incident_monitor
+        workspace = self._extract_monitor(incident_monitor_spectrum_number)
+
+        # 2. Multiply the workspace by the specified scaling factor.
+        scale_factor = self.getProperty("ScaleFactor").value
+        if scale_factor != 1.0:
+            workspace = self._scale(workspace, scale_factor)
+
+        # 3. Remove the prompt peak (if it exists)
+        workspace = self._perform_prompt_peak_correction(workspace, normalize_to_monitor_state)
+
+        # 4. Perform a flat background correction
+        workspace = self._perform_flat_background_correction(workspace, normalize_to_monitor_state)
+
+        # 5. Convert to wavelength with the specified bin settings.
+        workspace = self._convert_to_wavelength(workspace, normalize_to_monitor_state)
+
+        self.setProperty("OutputWorkspace", workspace)
+
+    def _scale(self, workspace, factor):
+        """
+        The incident monitor is scaled by a factor.
+
+        When we work with sliced event data, then we need to slice the monitor data set accordingly. The monitor
+        needs to be scaled by the slice factor which one gets when operating SANSSliceEvent. If this was not performed,
+        then we would be comparing the full monitor data with only parts of the detector data.
+        :param workspace: the workspace to scale.
+        :param factor: the scaling factor.
+        :return: a scaled workspace.
+        """
+        scale_name = "Scale"
+        scale_options = {"InputWorkspace": workspace,
+                         "OutputWorkspace": EMPTY_NAME,
+                         "Factor": factor,
+                         "Operation": "Multiply"}
+        scale_alg = create_unmanaged_algorithm(scale_name, **scale_options)
+        scale_alg.execute()
+        return scale_alg.getProperty("OutputWorkspace").value
+
+    def _extract_monitor(self, spectrum_number):
+        """
+        The extracts a single spectrum from the input workspace.
+
+        We are only interested in the incident monitor here.
+        :param spectrum_number: the spectrum number of the incident beam monitor.
+        :return: a workspace which only contains the incident beam spectrum.
+        """
+        workspace = self.getProperty("InputWorkspace").value
+
+        workspace_index = workspace.getIndexFromSpectrumNumber(spectrum_number)
+        extract_name = "ExtractSingleSpectrum"
+        extract_options = {"InputWorkspace": workspace,
+                           "OutputWorkspace": EMPTY_NAME,
+                           "WorkspaceIndex": workspace_index}
+        extract_alg = create_unmanaged_algorithm(extract_name, **extract_options)
+        extract_alg.execute()
+        return extract_alg.getProperty("OutputWorkspace").value
+
+    def _perform_prompt_peak_correction(self, workspace, normalize_to_monitor_state):
+        """
+        Performs a prompt peak correction.
+
+        A prompt peak can occur when very fast neutrons shoot through the measurement. This can happen when working
+        with two time regimes. Prompt peaks are prominent peaks which stand out from usual data. They occur frequently
+        on LOQ, but are now also a possibility on other instruments. We deal with them, by removing the data and
+        interpolating between the edge data points. If the user does not specify a start and stop time for the
+        prompt peak, then this correction is not performed.
+        :param workspace: the workspace which is to be corrected.
+        :param normalize_to_monitor_state: a SANSStateNormalizeToMonitor object.
+        :return: the corrected workspace.
+        """
+        prompt_peak_correction_start = normalize_to_monitor_state.prompt_peak_correction_min
+        prompt_peak_correction_stop = normalize_to_monitor_state.prompt_peak_correction_max
+        prompt_peak_correction_enabled = normalize_to_monitor_state.prompt_peak_correction_enabled
+        # We perform only a prompt peak correction if the start and stop values of the bins we want to remove,
+        # were explicitly set. Some instruments require it, others don't.
+        if prompt_peak_correction_enabled and prompt_peak_correction_start is not None and prompt_peak_correction_stop is not None:
+            remove_name = "RemoveBins"
+            remove_options = {"InputWorkspace": workspace,
+                              "OutputWorkspace": EMPTY_NAME,
+                              "XMin": prompt_peak_correction_start,
+                              "XMax": prompt_peak_correction_stop,
+                              "Interpolation": "Linear"}
+            remove_alg = create_unmanaged_algorithm(remove_name, **remove_options)
+            remove_alg.execute()
+            workspace = remove_alg.getProperty("OutputWorkspace").value
+        return workspace
+
+    def _perform_flat_background_correction(self, workspace, normalize_to_monitor_state):
+        """
+        Removes an offset from the monitor data.
+
+        A certain region of the data set is selected which corresponds to only background data. This data is averaged
+        which results in a mean background value which is subtracted from the data.
+        :param workspace: the workspace which is to be corrected.
+        :param normalize_to_monitor_state: a SANSStateNormalizeToMonitor object.
+        :return: the corrected workspace.
+        """
+
+        # Get the time range for the for the background calculation. First check if there is an entry for the
+        # incident monitor.
+        incident_monitor_spectrum_number = normalize_to_monitor_state.incident_monitor
+        incident_monitor_spectrum_as_string = str(incident_monitor_spectrum_number)
+        background_tof_monitor_start = normalize_to_monitor_state.background_TOF_monitor_start
+        background_tof_monitor_stop = normalize_to_monitor_state.background_TOF_monitor_stop
+
+        if incident_monitor_spectrum_as_string in list(background_tof_monitor_start.keys()) and \
+           incident_monitor_spectrum_as_string in list(background_tof_monitor_stop.keys()):
+            start_tof = background_tof_monitor_start[incident_monitor_spectrum_as_string]
+            stop_tof = background_tof_monitor_stop[incident_monitor_spectrum_as_string]
+        else:
+            start_tof = normalize_to_monitor_state.background_TOF_general_start
+            stop_tof = normalize_to_monitor_state.background_TOF_general_stop
+
+        # Only if a TOF range was set, do we have to perform a correction
+        if start_tof and stop_tof:
+            flat_name = "CalculateFlatBackground"
+            flat_options = {"InputWorkspace": workspace,
+                            "OutputWorkspace": EMPTY_NAME,
+                            "StartX": start_tof,
+                            "EndX": stop_tof,
+                            "Mode": "Mean"}
+            flat_alg = create_unmanaged_algorithm(flat_name, **flat_options)
+            flat_alg.execute()
+            workspace = flat_alg.getProperty("OutputWorkspace").value
+        return workspace
+
+    def _convert_to_wavelength(self, workspace, normalize_to_monitor_state):
+        """
+        Converts the workspace from time-of-flight units to wavelength units
+
+        :param workspace: a time-of-flight workspace.
+        :param normalize_to_monitor_state: a SANSStateNormalizeToMonitor object.
+        :return: a wavelength workspace.
+        """
+        wavelength_low = normalize_to_monitor_state.wavelength_low
+        wavelength_high = normalize_to_monitor_state.wavelength_high
+        wavelength_step = normalize_to_monitor_state.wavelength_step
+        wavelength_step_type = normalize_to_monitor_state.wavelength_step_type
+        wavelength_rebin_mode = normalize_to_monitor_state.rebin_type
+        convert_name = "SANSConvertToWavelengthAndRebin"
+        convert_options = {"InputWorkspace": workspace,
+                           "WavelengthLow": wavelength_low,
+                           "WavelengthHigh": wavelength_high,
+                           "WavelengthStep": wavelength_step,
+                           "WavelengthStepType": RangeStepType.to_string(wavelength_step_type),
+                           "RebinMode": RebinType.to_string(wavelength_rebin_mode)}
+
+        convert_alg = create_unmanaged_algorithm(convert_name, **convert_options)
+        convert_alg.setPropertyValue("OutputWorkspace", EMPTY_NAME)
+        convert_alg.setProperty("OutputWorkspace", workspace)
+        convert_alg.execute()
+        return convert_alg.getProperty("OutputWorkspace").value
+
+    def validateInputs(self):
+        errors = dict()
+        # Check that the input can be converted into the right state object
+        state_property_manager = self.getProperty("SANSState").value
+        try:
+            state = create_deserialized_sans_state_from_property_manager(state_property_manager)
+            state.property_manager = state_property_manager
+            state.validate()
+        except ValueError as err:
+            errors.update({"SANSNormalizeToMonitor": str(err)})
+        return errors
+
+
+# Register algorithm with Mantid
+AlgorithmFactory.subscribe(SANSNormalizeToMonitor)
diff --git a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSReductionCore.py b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSReductionCore.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc17388d70108e4e009abe6a2d2ce18a87a4fe1f
--- /dev/null
+++ b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSReductionCore.py
@@ -0,0 +1,416 @@
+# pylint: disable=invalid-name
+
+""" SANSReductionCore algorithm runs the sequence of reduction steps which are necessary to reduce a data set."""
+
+from __future__ import (absolute_import, division, print_function)
+from mantid.kernel import (Direction, PropertyManagerProperty, StringListValidator)
+from mantid.api import (DataProcessorAlgorithm, MatrixWorkspaceProperty, AlgorithmFactory, PropertyMode,
+                        IEventWorkspace, Progress)
+
+from sans.state.state_base import create_deserialized_sans_state_from_property_manager
+from sans.common.constants import EMPTY_NAME
+from sans.common.general_functions import (create_child_algorithm, append_to_sans_file_tag)
+from sans.common.enums import (DetectorType, DataType)
+
+
+class SANSReductionCore(DataProcessorAlgorithm):
+    def category(self):
+        return 'SANS\\Reduction'
+
+    def summary(self):
+        return ' Runs the the core reduction elements.'
+
+    def PyInit(self):
+        # ----------
+        # INPUT
+        # ----------
+        self.declareProperty(PropertyManagerProperty('SANSState'),
+                             doc='A property manager which fulfills the SANSState contract.')
+
+        # WORKSPACES
+        # Scatter Workspaces
+        self.declareProperty(MatrixWorkspaceProperty('ScatterWorkspace', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Input),
+                             doc='The scatter workspace. This workspace does not contain monitors.')
+        self.declareProperty(MatrixWorkspaceProperty('ScatterMonitorWorkspace', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Input),
+                             doc='The scatter monitor workspace. This workspace only contains monitors.')
+
+        # Transmission Workspace
+        self.declareProperty(MatrixWorkspaceProperty('TransmissionWorkspace', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Input),
+                             doc='The transmission workspace.')
+
+        # Direct Workspace
+        self.declareProperty(MatrixWorkspaceProperty('DirectWorkspace', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Input),
+                             doc='The direct workspace.')
+
+        self.setPropertyGroup("ScatterWorkspace", 'Data')
+        self.setPropertyGroup("ScatterMonitorWorkspace", 'Data')
+        self.setPropertyGroup("TransmissionWorkspace", 'Data')
+        self.setPropertyGroup("DirectWorkspace", 'Data')
+
+        # The component
+        allowed_detectors = StringListValidator([DetectorType.to_string(DetectorType.LAB),
+                                                 DetectorType.to_string(DetectorType.HAB)])
+        self.declareProperty("Component", DetectorType.to_string(DetectorType.LAB),
+                             validator=allowed_detectors, direction=Direction.Input,
+                             doc="The component of the instrument which is to be reduced.")
+
+        # The data type
+        allowed_data = StringListValidator([DataType.to_string(DataType.Sample),
+                                            DataType.to_string(DataType.Can)])
+        self.declareProperty("DataType", DataType.to_string(DataType.Sample),
+                             validator=allowed_data, direction=Direction.Input,
+                             doc="The component of the instrument which is to be reduced.")
+
+        # ----------
+        # OUTPUT
+        # ----------
+        self.declareProperty(MatrixWorkspaceProperty("OutputWorkspace", '', direction=Direction.Output),
+                             doc='The output workspace.')
+
+        self.declareProperty(MatrixWorkspaceProperty('SumOfCounts', '', optional=PropertyMode.Optional,
+                                                     direction=Direction.Output),
+                             doc='The sum of the counts of the output workspace.')
+
+        self.declareProperty(MatrixWorkspaceProperty('SumOfNormFactors', '', optional=PropertyMode.Optional,
+                                                     direction=Direction.Output),
+                             doc='The sum of the counts of the output workspace.')
+
+    def PyExec(self):
+        # Get the input
+        state = self._get_state()
+        state_serialized = state.property_manager
+        component_as_string = self.getProperty("Component").value
+        progress = self._get_progress()
+
+        # --------------------------------------------------------------------------------------------------------------
+        # 1. Crop workspace by detector name
+        #    This will create a reduced copy of the original workspace with only those spectra which are relevant
+        #    for this particular reduction.
+        # --------------------------------------------------------------------------------------------------------------
+        progress.report("Cropping ...")
+        workspace = self._get_cropped_workspace(component_as_string)
+
+        # --------------------------------------------------------------------------------------------
+        # 2. Perform dark run subtraction
+        #    This will subtract a dark background from the scatter workspace. Note that dark background subtraction
+        #    will also affect the transmission calculation later on.
+        # --------------------------------------------------------------------------------------------------------------
+
+        # --------------------------------------------------------------------------------------------------------------
+        # 3. Create event slice
+        #    If we are dealing with an event workspace as input, this will cut out a time-based (user-defined) slice.
+        #    In case of a histogram workspace, nothing happens.
+        # --------------------------------------------------------------------------------------------------------------
+        progress.report("Event slicing ...")
+        data_type_as_string = self.getProperty("DataType").value
+        monitor_workspace = self._get_monitor_workspace()
+        workspace, monitor_workspace, slice_event_factor = self._slice(state_serialized, workspace, monitor_workspace,
+                                                                       data_type_as_string)
+
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # COMPATIBILITY BEGIN
+        # IMPORTANT: This section of the code should only be temporary. It allows us to convert to histogram
+        # early on and hence compare the new reduction results with the output of the new reduction chain.
+        # Once the new reduction chain is established, we should remove the compatibility feature.
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        compatibility = state.compatibility
+        is_event_workspace = isinstance(workspace, IEventWorkspace)
+        if compatibility.use_compatibility_mode and is_event_workspace:
+            # We convert the workspace here to a histogram workspace, since we cannot otherwise
+            # compare the results between the old and the new reduction workspace in a meaningful manner.
+            # The old one is histogram and the new one is event.
+            # Rebin to monitor workspace
+            if compatibility.time_rebin_string:
+                rebin_name = "Rebin"
+                rebin_option = {"InputWorkspace": workspace,
+                                "Params": compatibility.time_rebin_string,
+                                "OutputWorkspace": EMPTY_NAME,
+                                "PreserveEvents": False}
+                rebin_alg = create_child_algorithm(self, rebin_name, **rebin_option)
+                rebin_alg.execute()
+                workspace = rebin_alg.getProperty("OutputWorkspace").value
+            else:
+                rebin_name = "RebinToWorkspace"
+                rebin_option = {"WorkspaceToRebin": workspace,
+                                "WorkspaceToMatch": monitor_workspace,
+                                "OutputWorkspace": EMPTY_NAME,
+                                "PreserveEvents": False}
+                rebin_alg = create_child_algorithm(self, rebin_name, **rebin_option)
+                rebin_alg.execute()
+                workspace = rebin_alg.getProperty("OutputWorkspace").value
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # COMPATIBILITY END
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+        # ------------------------------------------------------------
+        # 4. Move the workspace into the correct position
+        #    The detectors in the workspaces are set such that the beam centre is at (0,0). The position is
+        #    a user-specified value which can be obtained with the help of the beam centre finder.
+        # ------------------------------------------------------------
+        progress.report("Moving ...")
+        workspace = self._move(state_serialized, workspace, component_as_string)
+        monitor_workspace = self._move(state_serialized, monitor_workspace, component_as_string)
+
+        # --------------------------------------------------------------------------------------------------------------
+        # 5. Apply masking (pixel masking and time masking)
+        # --------------------------------------------------------------------------------------------------------------
+        progress.report("Masking ...")
+        workspace = self._mask(state_serialized, workspace, component_as_string)
+
+        # --------------------------------------------------------------------------------------------------------------
+        # 6. Convert to Wavelength
+        # --------------------------------------------------------------------------------------------------------------
+        progress.report("Converting to wavelength ...")
+        workspace = self._convert_to_wavelength(state_serialized, workspace)
+
+        # --------------------------------------------------------------------------------------------------------------
+        # 7. Multiply by volume and absolute scale
+        # --------------------------------------------------------------------------------------------------------------
+        progress.report("Multiplying by volume and absolute scale ...")
+        workspace = self._scale(state_serialized, workspace)
+
+        # --------------------------------------------------------------------------------------------------------------
+        # 8. Create adjustment workspaces, those are
+        #     1. pixel-based adjustments
+        #     2. wavelength-based adjustments
+        #     3. pixel-and-wavelength-based adjustments
+        # Note that steps 4 to 7 could run in parallel if we don't use wide angle correction. If we do then the
+        # creation of the adjustment workspaces requires the sample workspace itself and we have to run it sequentially.
+        # We could consider to have a serial and a parallel strategy here, depending on the wide angle correction
+        # settings. On the other hand it is not clear that this would be an advantage with the GIL.
+        # --------------------------------------------------------------------------------------------------------------
+        progress.report("Creating adjustment workspaces ...")
+        wavelength_adjustment_workspace, pixel_adjustment_workspace, wavelength_and_pixel_adjustment_workspace =\
+            self._adjustment(state_serialized, workspace, monitor_workspace, component_as_string, data_type_as_string)
+
+        # ------------------------------------------------------------
+        # 9. Convert event workspaces to histogram workspaces
+        # ------------------------------------------------------------
+        progress.report("Converting to histogram mode ...")
+        workspace = self._convert_to_histogram(workspace)
+
+        # ------------------------------------------------------------
+        # 10. Convert to Q
+        # -----------------------------------------------------------
+        progress.report("Converting to q ...")
+        workspace, sum_of_counts, sum_of_norms = self._convert_to_q(state_serialized,
+                                                                    workspace,
+                                                                    wavelength_adjustment_workspace,
+                                                                    pixel_adjustment_workspace,
+                                                                    wavelength_and_pixel_adjustment_workspace)
+        progress.report("Completed SANSReductionCore ...")
+
+        # ------------------------------------------------------------
+        # Populate the output
+        # ------------------------------------------------------------
+        self.setProperty("OutputWorkspace", workspace)
+
+        # ------------------------------------------------------------
+        # Diagnostic output
+        # ------------------------------------------------------------
+        if sum_of_counts:
+            self.setProperty("SumOfCounts", sum_of_counts)
+        if sum_of_norms:
+            self.setProperty("SumOfNormFactors", sum_of_norms)
+
+        # TODO: Publish temporary workspaces if required
+        # This includes partial workspaces of Q1D and unfitted transmission data
+
+    def _get_cropped_workspace(self, component):
+        scatter_workspace = self.getProperty("ScatterWorkspace").value
+        crop_name = "SANSCrop"
+        crop_options = {"InputWorkspace": scatter_workspace,
+                        "OutputWorkspace": EMPTY_NAME,
+                        "Component": component}
+        crop_alg = create_child_algorithm(self, crop_name, **crop_options)
+        crop_alg.execute()
+        return crop_alg.getProperty("OutputWorkspace").value
+
+    def _slice(self, state_serialized, workspace, monitor_workspace, data_type_as_string):
+        slice_name = "SANSSliceEvent"
+        slice_options = {"SANSState": state_serialized,
+                         "InputWorkspace": workspace,
+                         "InputWorkspaceMonitor": monitor_workspace,
+                         "OutputWorkspace": EMPTY_NAME,
+                         "OutputWorkspaceMonitor": "dummy2",
+                         "DataType": data_type_as_string}
+        slice_alg = create_child_algorithm(self, slice_name, **slice_options)
+        slice_alg.execute()
+
+        workspace = slice_alg.getProperty("OutputWorkspace").value
+        monitor_workspace = slice_alg.getProperty("OutputWorkspaceMonitor").value
+        slice_event_factor = slice_alg.getProperty("SliceEventFactor").value
+        return workspace, monitor_workspace, slice_event_factor
+
+    def _move(self, state_serialized, workspace, component, is_transmission=False):
+        # First we set the workspace to zero, since it might have been moved around by the user in the ADS
+        # Second we use the initial move to bring the workspace into the correct position
+        move_name = "SANSMove"
+        move_options = {"SANSState": state_serialized,
+                        "Workspace": workspace,
+                        "MoveType": "SetToZero",
+                        "Component": ""}
+        move_alg = create_child_algorithm(self, move_name, **move_options)
+        move_alg.execute()
+        workspace = move_alg.getProperty("Workspace").value
+
+        # Do the initial move
+        move_alg.setProperty("MoveType", "InitialMove")
+        move_alg.setProperty("Component", component)
+        move_alg.setProperty("Workspace", workspace)
+        move_alg.setProperty("IsTransmissionWorkspace", is_transmission)
+        move_alg.execute()
+        return move_alg.getProperty("Workspace").value
+
+    def _mask(self, state_serialized, workspace, component):
+        mask_name = "SANSMaskWorkspace"
+        mask_options = {"SANSState": state_serialized,
+                        "Workspace": workspace,
+                        "Component": component}
+        mask_alg = create_child_algorithm(self, mask_name, **mask_options)
+        mask_alg.execute()
+        return mask_alg.getProperty("Workspace").value
+
+    def _convert_to_wavelength(self, state_serialized, workspace):
+        wavelength_name = "SANSConvertToWavelength"
+        wavelength_options = {"SANSState": state_serialized,
+                              "InputWorkspace": workspace}
+        wavelength_alg = create_child_algorithm(self, wavelength_name, **wavelength_options)
+        wavelength_alg.setPropertyValue("OutputWorkspace", EMPTY_NAME)
+        wavelength_alg.setProperty("OutputWorkspace", workspace)
+        wavelength_alg.execute()
+        return wavelength_alg.getProperty("OutputWorkspace").value
+
+    def _scale(self, state_serialized, workspace):
+        scale_name = "SANSScale"
+        scale_options = {"SANSState": state_serialized,
+                         "InputWorkspace": workspace,
+                         "OutputWorkspace": EMPTY_NAME}
+        scale_alg = create_child_algorithm(self, scale_name, **scale_options)
+        scale_alg.execute()
+        return scale_alg.getProperty("OutputWorkspace").value
+
+    def _adjustment(self, state_serialized, workspace, monitor_workspace, component_as_string, data_type):
+        transmission_workspace = self._get_transmission_workspace()
+        direct_workspace = self._get_direct_workspace()
+
+        adjustment_name = "SANSCreateAdjustmentWorkspaces"
+        adjustment_options = {"SANSState": state_serialized,
+                              "Component": component_as_string,
+                              "DataType": data_type,
+                              "MonitorWorkspace": monitor_workspace,
+                              "SampleData": workspace,
+                              "OutputWorkspaceWavelengthAdjustment": EMPTY_NAME,
+                              "OutputWorkspacePixelAdjustment": EMPTY_NAME,
+                              "OutputWorkspaceWavelengthAndPixelAdjustment": EMPTY_NAME}
+        if transmission_workspace:
+            transmission_workspace = self._move(state_serialized, transmission_workspace, component_as_string,
+                                                is_transmission=True)
+            adjustment_options.update({"TransmissionWorkspace": transmission_workspace})
+
+        if direct_workspace:
+            direct_workspace = self._move(state_serialized, direct_workspace, component_as_string, is_transmission=True)
+            adjustment_options.update({"DirectWorkspace": direct_workspace})
+
+        adjustment_alg = create_child_algorithm(self, adjustment_name, **adjustment_options)
+        adjustment_alg.execute()
+
+        wavelength_adjustment = adjustment_alg.getProperty("OutputWorkspaceWavelengthAdjustment").value
+        pixel_adjustment = adjustment_alg.getProperty("OutputWorkspacePixelAdjustment").value
+        wavelength_and_pixel_adjustment = adjustment_alg.getProperty(
+                                           "OutputWorkspaceWavelengthAndPixelAdjustment").value
+        return wavelength_adjustment, pixel_adjustment, wavelength_and_pixel_adjustment
+
+    def _convert_to_histogram(self, workspace):
+        if isinstance(workspace, IEventWorkspace):
+            convert_name = "RebinToWorkspace"
+            convert_options = {"WorkspaceToRebin": workspace,
+                               "WorkspaceToMatch": workspace,
+                               "OutputWorkspace": "OutputWorkspace",
+                               "PreserveEvents": False}
+            convert_alg = create_child_algorithm(self, convert_name, **convert_options)
+            convert_alg.execute()
+            workspace = convert_alg.getProperty("OutputWorkspace").value
+            append_to_sans_file_tag(workspace, "_histogram")
+
+        return workspace
+
+    def _convert_to_q(self, state_serialized, workspace, wavelength_adjustment_workspace, pixel_adjustment_workspace,
+                      wavelength_and_pixel_adjustment_workspace):
+        """
+        A conversion to momentum transfer is performed in this step.
+
+        The conversion can be either to the modulus of Q in which case the output is a 1D workspace, or it can
+        be a 2D reduction where the y axis is Qy, ie it is a numeric axis.
+        @param state: a SANSState object
+        @param workspace: the workspace to convert to momentum transfer.
+        @param wavelength_adjustment_workspace: the wavelength adjustment workspace.
+        @param pixel_adjustment_workspace: the pixel adjustment workspace.
+        @param wavelength_and_pixel_adjustment_workspace: the wavelength and pixel adjustment workspace.
+        @return: a reduced workspace
+        """
+        convert_name = "SANSConvertToQ"
+        convert_options = {"InputWorkspace": workspace,
+                           "OutputWorkspace": EMPTY_NAME,
+                           "SANSState": state_serialized,
+                           "OutputParts": True}
+        if wavelength_adjustment_workspace:
+            convert_options.update({"InputWorkspaceWavelengthAdjustment": wavelength_adjustment_workspace})
+        if pixel_adjustment_workspace:
+            convert_options.update({"InputWorkspacePixelAdjustment": pixel_adjustment_workspace})
+        if wavelength_and_pixel_adjustment_workspace:
+            convert_options.update({"InputWorkspaceWavelengthAndPixelAdjustment":
+                                    wavelength_and_pixel_adjustment_workspace})
+        convert_alg = create_child_algorithm(self, convert_name, **convert_options)
+        convert_alg.execute()
+        data_workspace = convert_alg.getProperty("OutputWorkspace").value
+        sum_of_counts = convert_alg.getProperty("SumOfCounts").value
+        sum_of_norms = convert_alg.getProperty("SumOfNormFactors").value
+        return data_workspace, sum_of_counts, sum_of_norms
+
+    def validateInputs(self):
+        errors = dict()
+        # Check that the input can be converted into the right state object
+        try:
+            state = self._get_state()
+            state.validate()
+        except ValueError as err:
+            errors.update({"SANSSingleReduction": str(err)})
+        return errors
+
+    def _get_state(self):
+        state_property_manager = self.getProperty("SANSState").value
+        state = create_deserialized_sans_state_from_property_manager(state_property_manager)
+        state.property_manager = state_property_manager
+        return state
+
+    def _get_transmission_workspace(self):
+        transmission_workspace = self.getProperty("TransmissionWorkspace").value
+        return self._get_cloned_workspace(transmission_workspace) if transmission_workspace else None
+
+    def _get_direct_workspace(self):
+        direct_workspace = self.getProperty("DirectWorkspace").value
+        return self._get_cloned_workspace(direct_workspace) if direct_workspace else None
+
+    def _get_monitor_workspace(self):
+        monitor_workspace = self.getProperty("ScatterMonitorWorkspace").value
+        return self._get_cloned_workspace(monitor_workspace)
+
+    def _get_cloned_workspace(self, workspace):
+        clone_name = "CloneWorkspace"
+        clone_options = {"InputWorkspace": workspace,
+                         "OutputWorkspace": EMPTY_NAME}
+        clone_alg = create_child_algorithm(self, clone_name, **clone_options)
+        clone_alg.execute()
+        return clone_alg.getProperty("OutputWorkspace").value
+
+    def _get_progress(self):
+        return Progress(self, start=0.0, end=1.0, nreports=10)
+
+
+# Register algorithm with Mantid
+AlgorithmFactory.subscribe(SANSReductionCore)
diff --git a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSSingleReduction.py b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSSingleReduction.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b1296ca49ee857e2bd9c7aceb15cb31c2513096
--- /dev/null
+++ b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSSingleReduction.py
@@ -0,0 +1,383 @@
+# pylint: disable=invalid-name
+
+""" SANSSingleReduction algorithm performs a single reduction."""
+
+from __future__ import (absolute_import, division, print_function)
+from mantid.kernel import (Direction, PropertyManagerProperty, Property)
+from mantid.api import (DataProcessorAlgorithm, MatrixWorkspaceProperty, AlgorithmFactory, PropertyMode, Progress)
+
+from sans.state.state_base import create_deserialized_sans_state_from_property_manager
+from sans.common.enums import (ReductionMode, DataType, ISISReductionMode)
+from sans.common.general_functions import (create_child_algorithm, does_can_workspace_exist_on_ads)
+from sans.algorithm_detail.single_execution import (run_core_reduction, get_final_output_workspaces,
+                                                    get_merge_bundle_for_merge_request, run_optimized_for_can)
+from sans.algorithm_detail.bundles import ReductionSettingBundle
+
+
+class SANSSingleReduction(DataProcessorAlgorithm):
+    def category(self):
+        return 'SANS\\Reduction'
+
+    def summary(self):
+        return 'Performs a single reduction of SANS data.'
+
+    def PyInit(self):
+        # ----------
+        # INPUT
+        # ----------
+        self.declareProperty(PropertyManagerProperty('SANSState'),
+                             doc='A property manager which fulfills the SANSState contract.')
+
+        self.declareProperty("UseOptimizations", True, direction=Direction.Input,
+                             doc="When enabled the ADS is being searched for already loaded and reduced workspaces. "
+                                 "Depending on your concrete reduction, this could provide a significant"
+                                 " performance boost")
+
+        # Sample Scatter Workspaces
+        self.declareProperty(MatrixWorkspaceProperty('SampleScatterWorkspace', '',
+                                                     optional=PropertyMode.Mandatory, direction=Direction.Input),
+                             doc='The sample scatter workspace. This workspace does not contain monitors.')
+        self.declareProperty(MatrixWorkspaceProperty('SampleScatterMonitorWorkspace', '',
+                                                     optional=PropertyMode.Mandatory, direction=Direction.Input),
+                             doc='The sample scatter monitor workspace. This workspace only contains monitors.')
+
+        # Sample Transmission Workspace
+        self.declareProperty(MatrixWorkspaceProperty('SampleTransmissionWorkspace', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Input),
+                             doc='The sample transmission workspace.')
+
+        # Sample Direct Workspace
+        self.declareProperty(MatrixWorkspaceProperty('SampleDirectWorkspace', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Input),
+                             doc='The sample scatter direct workspace.')
+
+        self.setPropertyGroup("SampleScatterWorkspace", 'Sample')
+        self.setPropertyGroup("SampleScatterMonitorWorkspace", 'Sample')
+        self.setPropertyGroup("SampleTransmissionWorkspace", 'Sample')
+        self.setPropertyGroup("SampleDirectWorkspace", 'Sample')
+
+        # Can Scatter Workspaces
+        self.declareProperty(MatrixWorkspaceProperty('CanScatterWorkspace', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Input),
+                             doc='The can scatter workspace. This workspace does not contain monitors.')
+        self.declareProperty(MatrixWorkspaceProperty('CanScatterMonitorWorkspace', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Input),
+                             doc='The can scatter monitor workspace. This workspace only contains monitors.')
+
+        # Sample Transmission Workspace
+        self.declareProperty(MatrixWorkspaceProperty('CanTransmissionWorkspace', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Input),
+                             doc='The can transmission workspace.')
+
+        # Sample Direct Workspace
+        self.declareProperty(MatrixWorkspaceProperty('CanDirectWorkspace', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Input),
+                             doc='The sample scatter direct workspace.')
+
+        self.setPropertyGroup("CanScatterWorkspace", 'Can')
+        self.setPropertyGroup("CanScatterMonitorWorkspace", 'Can')
+        self.setPropertyGroup("CanTransmissionWorkspace", 'Can')
+        self.setPropertyGroup("CanDirectWorkspace", 'Can')
+
+        # ----------
+        # OUTPUT
+        # ----------
+        self.declareProperty('OutScaleFactor', defaultValue=Property.EMPTY_DBL, direction=Direction.Output,
+                             doc='Applied scale factor.')
+
+        self.declareProperty('OutShiftFactor', defaultValue=Property.EMPTY_DBL, direction=Direction.Output,
+                             doc='Applied shift factor.')
+
+        # This breaks our flexibility with the reduction mode. We need to check if we can populate this based on
+        # the available reduction modes for the state input. TODO: check if this is possible
+        self.declareProperty(MatrixWorkspaceProperty('OutputWorkspaceLAB', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Output),
+                             doc='The output workspace for the low-angle bank.')
+        self.declareProperty(MatrixWorkspaceProperty('OutputWorkspaceHAB', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Output),
+                             doc='The output workspace for the high-angle bank.')
+        self.declareProperty(MatrixWorkspaceProperty('OutputWorkspaceMerged', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Output),
+                             doc='The output workspace for the merged reduction.')
+        self.setPropertyGroup("OutScaleFactor", 'Output')
+        self.setPropertyGroup("OutShiftFactor", 'Output')
+        self.setPropertyGroup("OutputWorkspaceLAB", 'Output')
+        self.setPropertyGroup("OutputWorkspaceHAB", 'Output')
+        self.setPropertyGroup("OutputWorkspaceMerged", 'Output')
+
+        # CAN output
+        # We want to output the can workspaces since they can be persited in the case of optimizations
+        self.declareProperty(MatrixWorkspaceProperty('OutputWorkspaceLABCan', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Output),
+                             doc='The can output workspace for the low-angle bank, provided there is one.')
+        self.declareProperty(MatrixWorkspaceProperty('OutputWorkspaceLABCanCount', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Output),
+                             doc='The can count output workspace for the low-angle bank, provided there is one.')
+        self.declareProperty(MatrixWorkspaceProperty('OutputWorkspaceLABCanNorm', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Output),
+                             doc='The can norm output workspace for the low-angle bank, provided there is one.')
+
+        self.declareProperty(MatrixWorkspaceProperty('OutputWorkspaceHABCan', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Output),
+                             doc='The can output workspace for the high-angle bank, provided there is one.')
+        self.declareProperty(MatrixWorkspaceProperty('OutputWorkspaceHABCanCount', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Output),
+                             doc='The can count output workspace for the high-angle bank, provided there is one.')
+        self.declareProperty(MatrixWorkspaceProperty('OutputWorkspaceHABCanNorm', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Output),
+                             doc='The can norm output workspace for the high-angle bank, provided there is one.')
+        self.setPropertyGroup("OutputWorkspaceLABCan", 'Can Output')
+        self.setPropertyGroup("OutputWorkspaceLABCanCount", 'Can Output')
+        self.setPropertyGroup("OutputWorkspaceLABCanNorm", 'Can Output')
+        self.setPropertyGroup("OutputWorkspaceHABCan", 'Can Output')
+        self.setPropertyGroup("OutputWorkspaceHABCanCount", 'Can Output')
+        self.setPropertyGroup("OutputWorkspaceHABCanNorm", 'Can Output')
+
+    def PyExec(self):
+        # Get state
+        state = self._get_state()
+
+        # Get reduction mode
+        overall_reduction_mode = self._get_reduction_mode(state)
+
+        # Decide which core reduction information to run, i.e. HAB, LAB, ALL, MERGED. In the case of ALL and MERGED,
+        # the required simple reduction modes need to be run. Normally this is HAB and LAB, future implementations
+        # might have more detectors though (or different types)
+        reduction_setting_bundles = self._get_reduction_setting_bundles(state, overall_reduction_mode)
+
+        # Run core reductions
+        use_optimizations = self.getProperty("UseOptimizations").value
+
+        # Create the reduction core algorithm
+        reduction_name = "SANSReductionCore"
+        reduction_options = {}
+        reduction_alg = create_child_algorithm(self, reduction_name, **reduction_options)
+
+        # Set up progress
+        progress = self._get_progress(len(reduction_setting_bundles), overall_reduction_mode)
+
+        # --------------------------------------------------------------------------------------------------------------
+        # Reduction
+        # --------------------------------------------------------------------------------------------------------------
+        output_bundles = []
+        output_parts_bundles = []
+
+        for reduction_setting_bundle in reduction_setting_bundles:
+            progress.report("Running a single reduction ...")
+            # We want to make use of optimizations here. If a can workspace has already been reduced with the same can
+            # settings and is stored in the ADS, then we should use it (provided the user has optimizations enabled).
+            if use_optimizations and reduction_setting_bundle.data_type is DataType.Can:
+                output_bundle, output_parts_bundle = run_optimized_for_can(reduction_alg,
+                                                                           reduction_setting_bundle)
+            else:
+                output_bundle, output_parts_bundle = run_core_reduction(reduction_alg,
+                                                                        reduction_setting_bundle)
+            output_bundles.append(output_bundle)
+            output_parts_bundles.append(output_parts_bundle)
+
+        # --------------------------------------------------------------------------------------------------------------
+        # Deal with merging
+        # --------------------------------------------------------------------------------------------------------------
+        reduction_mode_vs_output_workspaces = {}
+        # Merge if required with stitching etc.
+        if overall_reduction_mode is ReductionMode.Merged:
+            progress.report("Merging reductions ...")
+            merge_bundle = get_merge_bundle_for_merge_request(output_parts_bundles, self)
+            self.set_shift_and_scale_output(merge_bundle)
+            reduction_mode_vs_output_workspaces.update({ReductionMode.Merged: merge_bundle.merged_workspace})
+
+        # --------------------------------------------------------------------------------------------------------------
+        # Deal with non-merged
+        # Note that we have non-merged workspaces even in the case of a merged reduction, ie LAB and HAB results
+        # --------------------------------------------------------------------------------------------------------------
+        progress.report("Final clean up...")
+        output_workspaces_non_merged = get_final_output_workspaces(output_bundles, self)
+        reduction_mode_vs_output_workspaces.update(output_workspaces_non_merged)
+
+        # --------------------------------------------------------------------------------------------------------------
+        # Set the output workspaces
+        # --------------------------------------------------------------------------------------------------------------
+        # Set sample logs
+        # Todo: Set sample log -> Userfile and unfitted transmission workspace. Should probably set on
+        # higher level (SANSBatch)
+        # Set the output workspaces
+        self.set_output_workspaces(reduction_mode_vs_output_workspaces)
+
+        # --------------------------------------------------------------------------------------------------------------
+        # Set the reduced can workspaces on the output if optimizations are
+        # enabled. This will allow SANSBatchReduction to add them to the ADS.
+        # --------------------------------------------------------------------------------------------------------------
+        if use_optimizations:
+            self.set_reduced_can_workspace_on_output(output_bundles, output_parts_bundles)
+
+    def validateInputs(self):
+        errors = dict()
+        # Check that the input can be converted into the right state object
+        try:
+            state = self._get_state()
+            state.validate()
+        except ValueError as err:
+            errors.update({"SANSSingleReduction": str(err)})
+        return errors
+
+    def _get_state(self):
+        state_property_manager = self.getProperty("SANSState").value
+        state = create_deserialized_sans_state_from_property_manager(state_property_manager)
+        state.property_manager = state_property_manager
+        return state
+
+    def _get_reduction_mode(self, state):
+        reduction_info = state.reduction
+        reduction_mode = reduction_info.reduction_mode
+        return reduction_mode
+
+    def _get_reduction_setting_bundles(self, state, reduction_mode):
+        # We need to output the parts if we request a merged reduction mode. This is necessary for stitching later on.
+        output_parts = reduction_mode is ReductionMode.Merged
+
+        # If the reduction mode is MERGED, then we need to make sure that all reductions for that selection
+        # are executed, i.e. we need to split it up
+        if reduction_mode is ReductionMode.Merged:
+            # If we are dealing with a merged reduction we need to know which detectors should be merged.
+            reduction_info = state.reduction
+            reduction_modes = reduction_info.get_merge_strategy()
+        elif reduction_mode is ReductionMode.All:
+            reduction_info = state.reduction
+            reduction_modes = reduction_info.get_all_reduction_modes()
+        else:
+            reduction_modes = [reduction_mode]
+
+        # Create the Scatter information
+        sample_info = self._create_reduction_bundles_for_data_type(state=state,
+                                                                   data_type=DataType.Sample,
+                                                                   reduction_modes=reduction_modes,
+                                                                   output_parts=output_parts,
+                                                                   scatter_name="SampleScatterWorkspace",
+                                                                   scatter_monitor_name="SampleScatterMonitorWorkspace",
+                                                                   transmission_name="SampleTransmissionWorkspace",
+                                                                   direct_name="SampleDirectWorkspace")
+
+        # Create the Can information
+        can_info = self._create_reduction_bundles_for_data_type(state=state,
+                                                                data_type=DataType.Can,
+                                                                reduction_modes=reduction_modes,
+                                                                output_parts=output_parts,
+                                                                scatter_name="CanScatterWorkspace",
+                                                                scatter_monitor_name="CanScatterMonitorWorkspace",
+                                                                transmission_name="CanTransmissionWorkspace",
+                                                                direct_name="CanDirectWorkspace")
+        reduction_setting_bundles = sample_info
+
+        # Make sure that the can information has at least a scatter and a monitor workspace
+        for can_bundle in can_info:
+            if can_bundle.scatter_workspace is not None and can_bundle.scatter_monitor_workspace is not None:
+                reduction_setting_bundles.append(can_bundle)
+        return reduction_setting_bundles
+
+    def _create_reduction_bundles_for_data_type(self, state, data_type, reduction_modes, output_parts,
+                                                scatter_name, scatter_monitor_name, transmission_name, direct_name):
+        # Get workspaces
+        scatter_workspace = self.getProperty(scatter_name).value
+
+        scatter_monitor_workspace = self.getProperty(scatter_monitor_name).value
+        transmission_workspace = self.getProperty(transmission_name).value
+        direct_workspace = self.getProperty(direct_name).value
+
+        # Iterate over all requested reduction types, i.e. LAB, HAB, ..
+        reduction_setting_bundles = []
+        for reduction_mode in reduction_modes:
+            reduction_setting_bundle = ReductionSettingBundle(state=state,
+                                                              data_type=data_type,
+                                                              reduction_mode=reduction_mode,
+                                                              output_parts=output_parts,
+                                                              scatter_workspace=scatter_workspace,
+                                                              scatter_monitor_workspace=scatter_monitor_workspace,
+                                                              transmission_workspace=transmission_workspace,
+                                                              direct_workspace=direct_workspace)
+            reduction_setting_bundles.append(reduction_setting_bundle)
+        return reduction_setting_bundles
+
+    def set_shift_and_scale_output(self, merge_bundle):
+        self.setProperty("OutScaleFactor", merge_bundle.scale)
+        self.setProperty("OutShiftFactor", merge_bundle.shift)
+
+    def set_output_workspaces(self, reduction_mode_vs_output_workspaces):
+        """
+        Sets the output workspaces which can be HAB, LAB or Merged.
+
+        At this step we also provide a workspace name to the sample logs which can be used later on for saving
+        :param reduction_mode_vs_output_workspaces:  map from reduction mode to output workspace
+        """
+        # Note that this breaks the flexibility that we have established with the reduction mode. We have not hardcoded
+        # HAB or LAB anywhere which means that in the future there could be other detectors of relevance. Here we
+        # reference HAB and LAB directly since we currently don't want to rely on dynamic properties. See also in PyInit
+        for reduction_mode, output_workspace in list(reduction_mode_vs_output_workspaces.items()):
+            if reduction_mode is ReductionMode.Merged:
+                self.setProperty("OutputWorkspaceMerged", output_workspace)
+            elif reduction_mode is ISISReductionMode.LAB:
+                self.setProperty("OutputWorkspaceLAB", output_workspace)
+            elif reduction_mode is ISISReductionMode.HAB:
+                self.setProperty("OutputWorkspaceHAB", output_workspace)
+            else:
+                raise RuntimeError("SANSSingleReduction: Cannot set the output workspace. The selected reduction "
+                                   "mode {0} is unknown.".format(reduction_mode))
+
+    def set_reduced_can_workspace_on_output(self, output_bundles, output_bundles_part):
+        """
+        Sets the reduced can workspaces on the output properties.
+
+        The reduced can workspaces can be:
+        1. LAB Can
+        2. LAB Can Count
+        3. LAB Can Norm
+        4. HAB Can
+        5. HAB Can Count
+        6. HAB Can Norm
+        :param output_bundles: a list of output bundles
+        :param output_bundles_part: a list of partial output bundles
+        """
+        # Find the LAB Can and HAB Can entries if they exist
+        for output_bundle in output_bundles:
+            if output_bundle.data_type is DataType.Can:
+                reduction_mode = output_bundle.reduction_mode
+                output_workspace = output_bundle.output_workspace
+                # Make sure that the output workspace is not None which can be the case if there has never been a
+                # can set for the reduction.
+                if output_workspace is not None and not does_can_workspace_exist_on_ads(output_workspace):
+                    if reduction_mode is ISISReductionMode.LAB:
+                        self.setProperty("OutputWorkspaceLABCan", output_workspace)
+                    elif reduction_mode is ISISReductionMode.HAB:
+                        self.setProperty("OutputWorkspaceHABCan", output_bundle.output_workspace)
+                    else:
+                        raise RuntimeError("SANSSingleReduction: The reduction mode {0} should not"
+                                           " be set with a can.".format(reduction_mode))
+
+        # Find the partial output bundles fo LAB Can and HAB Can if they exist
+        for output_bundle_part in output_bundles_part:
+            if output_bundle_part.data_type is DataType.Can:
+                reduction_mode = output_bundle_part.reduction_mode
+                output_workspace_count = output_bundle_part.output_workspace_count
+                output_workspace_norm = output_bundle_part.output_workspace_norm
+                # Make sure that the output workspace is not None which can be the case if there has never been a
+                # can set for the reduction.
+                if output_workspace_norm is not None and output_workspace_count is not None and \
+                        not does_can_workspace_exist_on_ads(output_workspace_norm) and \
+                        not does_can_workspace_exist_on_ads(output_workspace_count):
+                    if reduction_mode is ISISReductionMode.LAB:
+                        self.setProperty("OutputWorkspaceLABCanCount", output_workspace_count)
+                        self.setProperty("OutputWorkspaceLABCanNorm", output_workspace_norm)
+                    elif reduction_mode is ISISReductionMode.HAB:
+                        self.setProperty("OutputWorkspaceHABCanCount", output_workspace_count)
+                        self.setProperty("OutputWorkspaceHABCanNorm", output_workspace_norm)
+                    else:
+                        raise RuntimeError("SANSSingleReduction: The reduction mode {0} should not"
+                                           " be set with a partial can.".format(reduction_mode))
+
+    def _get_progress(self, number_of_reductions, overall_reduction_mode):
+        number_from_merge = 1 if overall_reduction_mode is ReductionMode.Merged else 0
+        number_of_progress_reports = number_of_reductions + number_from_merge + 1
+        return Progress(self, start=0.0, end=1.0, nreports=number_of_progress_reports)
+
+
+# Register algorithm with Mantid
+AlgorithmFactory.subscribe(SANSSingleReduction)
diff --git a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANSStitch.py b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANSStitch.py
index 4cc5ad0f1f3068c3d263b011525f99b84dd20a17..ce1208b881bd81272b54916db3c7e11f9afcb3c1 100644
--- a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANSStitch.py
+++ b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANSStitch.py
@@ -161,7 +161,8 @@ class SANSStitch(DataProcessorAlgorithm):
         # We want: (Cf+shift*Nf+Cr)/(Nf/scale + Nr)
         shifted_norm_front = self._scale(nF, shift_factor)
         scaled_norm_front = self._scale(nF, 1.0 / scale_factor)
-        numerator = self._add(self._add(cF, shifted_norm_front), cR)
+        add_counts_and_shift = self._add(cF, shifted_norm_front)
+        numerator = self._add(add_counts_and_shift, cR)
         denominator = self._add(scaled_norm_front, nR)
         merged_q = self._divide(numerator, denominator)
         return merged_q
@@ -199,7 +200,10 @@ class SANSStitch(DataProcessorAlgorithm):
         x_vals = ws.readX(0)
         start_x = x_vals[start]
         # Make sure we're inside the bin that we want to crop
-        end_x = x_vals[stop + 1]
+        if len(y_vals) == len(x_vals):
+            end_x = x_vals[stop]
+        else:
+            end_x = x_vals[stop + 1]
         return self._crop_to_x_range(ws=ws,x_min=start_x, x_max=end_x)
 
     def _run_fit(self, q_high_angle, q_low_angle, scale_factor, shift_factor):
diff --git a/Framework/PythonInterface/plugins/algorithms/dnsdata.py b/Framework/PythonInterface/plugins/algorithms/dnsdata.py
index 667e20f8dad35ad44a79cf461bb5d73d0712cf68..c94073886c40873816d95c1fb47c9985f266993c 100644
--- a/Framework/PythonInterface/plugins/algorithms/dnsdata.py
+++ b/Framework/PythonInterface/plugins/algorithms/dnsdata.py
@@ -1,7 +1,7 @@
 # pylint: disable=too-many-instance-attributes,too-few-public-methods
 from __future__ import (absolute_import, division, print_function)
 import re
-import datetime
+from dateutil.parser import parse
 
 
 class DNSdata(object):
@@ -182,11 +182,14 @@ class DNSdata(object):
             if self.tof_channel_number > 1:
                 self.tof_channel_width = float(b6splitted[3].split()[3])
                 self.tof_delay_time = float(b6splitted[4].split()[2])
-                self.tof_elastic_channel = int(b6splitted[6].split()[3])
+                if len(b6splitted[6].split()) > 3:
+                    self.tof_elastic_channel = int(b6splitted[6].split()[3])
                 # chopper rotation speed
-                self.chopper_rotation_speed = float(b6splitted[7].split()[2])
+                if len(b6splitted[7].split()) > 2:
+                    self.chopper_rotation_speed = float(b6splitted[7].split()[2])
                 # chopper number of slits
-                self.chopper_slits = int(b6splitted[5].split()[2])
+                if len(b6splitted[5].split()) > 2:
+                    self.chopper_slits = int(b6splitted[5].split()[2])
 
             # parse block 7 (Time and monitor)
             # assume everything to be at the fixed positions
@@ -203,11 +206,9 @@ class DNSdata(object):
             self.monitor_counts = int(line[1])
             # start_time and end_time (if specified)
             outfmt = "%Y-%m-%dT%H:%M:%S"
-            sinfmt = "start   at %a %b  %d %H:%M:%S %Y"
-            einfmt = "stopped at %a %b  %d %H:%M:%S %Y"
             try:
-                self.start_time = datetime.datetime.strptime(b7splitted[5], sinfmt).strftime(outfmt)
-                self.end_time = datetime.datetime.strptime(b7splitted[6], einfmt).strftime(outfmt)
+                self.start_time = parse(b7splitted[5][10:].strip()).strftime(outfmt)
+                self.end_time = parse(b7splitted[6][10:].strip()).strftime(outfmt)
             except ValueError:
                 # if start and end time are not given, let them empty
                 pass
diff --git a/Framework/PythonInterface/test/python/mantid/api/IFunction1DTest.py b/Framework/PythonInterface/test/python/mantid/api/IFunction1DTest.py
index 4a3fed4636a223f92a55e95a4721aa3392bee9da..ca268465222b8e65528a80082f0ff1f5d9b83412 100644
--- a/Framework/PythonInterface/test/python/mantid/api/IFunction1DTest.py
+++ b/Framework/PythonInterface/test/python/mantid/api/IFunction1DTest.py
@@ -19,6 +19,7 @@ class Times2(IFunction1D):
         self.declareAttribute("DoubleAtt", 3.4)
         self.declareAttribute("StringAtt", "filename")
         self.declareAttribute("BoolAtt", True)
+        self.declareAttribute("ListAtt", [1, 2, 3])
 
         self.declareParameter("ParamZeroInitNoDescr")
         self.declareParameter("ParamNoDescr", 1.5)
@@ -55,8 +56,8 @@ class IFunction1DTest(unittest.TestCase):
     def test_declareAttribute_only_accepts_known_types(self):
         func = Times2()
         func.initialize() # Contains known types
-        self.assertEquals(4, func.nAttributes()) # Make sure initialize ran
-        self.assertRaises(ValueError, func.declareAttribute, "ListAtt", [1,2,3])
+        self.assertEquals(5, func.nAttributes()) # Make sure initialize ran
+        self.assertRaises(ValueError, func.declareAttribute, "DictAtt", {1,2,3})
 
     def test_correct_attribute_values_are_returned_when_asked(self):
         func = Times2()
diff --git a/Framework/PythonInterface/test/python/mantid/kernel/UnitFactoryTest.py b/Framework/PythonInterface/test/python/mantid/kernel/UnitFactoryTest.py
index 53d9b21d8bbc4ca8eecc4bc58183d1af0dfb9d87..2ee8306b50c7fec572c5eb2e49e299c6fbcf32ef 100644
--- a/Framework/PythonInterface/test/python/mantid/kernel/UnitFactoryTest.py
+++ b/Framework/PythonInterface/test/python/mantid/kernel/UnitFactoryTest.py
@@ -25,7 +25,7 @@ class UnitFactoryTest(unittest.TestCase):
         core_units = ['Empty', 'Label', 'TOF', 'Wavelength','Energy',
                       'Energy_inWavenumber', 'dSpacing', 'MomentumTransfer',
                       'QSquared', 'DeltaE', 'DeltaE_inWavenumber',
-                      'DeltaE_inFrequency', 'Momentum']
+                      'DeltaE_inFrequency', 'Momentum', 'dSpacingPerpendicular']
         self.assertTrue(len(core_units) <= len(known_units))
 
         for unit in core_units:
diff --git a/Framework/PythonInterface/test/python/plugins/algorithms/FindEPPTest.py b/Framework/PythonInterface/test/python/plugins/algorithms/FindEPPTest.py
index 14eef9a18081873906761dbcc2e0b700cbdc2708..8fe71f71d0b0d2add3b42dff6629dab24dd6e729 100644
--- a/Framework/PythonInterface/test/python/plugins/algorithms/FindEPPTest.py
+++ b/Framework/PythonInterface/test/python/plugins/algorithms/FindEPPTest.py
@@ -24,7 +24,7 @@ class FindEPPTest(unittest.TestCase):
     def testTable(self):
         # tests that correct table is created
         OutputWorkspaceName = "outputws1"
-        alg_test = run_algorithm("FindEPP", InputWorkspace=self._input_ws, OutputWorkspace=OutputWorkspaceName)
+        alg_test = run_algorithm("FindEPP", InputWorkspace=self._input_ws, OutputWorkspace=OutputWorkspaceName, Version=1)
         self.assertTrue(alg_test.isExecuted())
         wsoutput = AnalysisDataService.retrieve(OutputWorkspaceName)
         self.assertEqual(2, wsoutput.rowCount())
@@ -38,7 +38,7 @@ class FindEPPTest(unittest.TestCase):
         ws2 = CloneWorkspace(self._input_ws)
         group = GroupWorkspaces([self._input_ws, ws2])
         OutputWorkspaceName = "output_wsgroup"
-        alg_test = run_algorithm("FindEPP", InputWorkspace='group', OutputWorkspace=OutputWorkspaceName)
+        alg_test = run_algorithm("FindEPP", InputWorkspace='group', OutputWorkspace=OutputWorkspaceName, Version=1)
         self.assertTrue(alg_test.isExecuted())
         wsoutput = AnalysisDataService.retrieve(OutputWorkspaceName)
         self.assertTrue(isinstance(wsoutput, WorkspaceGroup))
@@ -50,7 +50,7 @@ class FindEPPTest(unittest.TestCase):
     def testFitSuccess(self):
         # tests successful fit
         OutputWorkspaceName = "outputws2"
-        alg_test = run_algorithm("FindEPP", InputWorkspace=self._input_ws, OutputWorkspace=OutputWorkspaceName)
+        alg_test = run_algorithm("FindEPP", InputWorkspace=self._input_ws, OutputWorkspace=OutputWorkspaceName, Version=1)
         self.assertTrue(alg_test.isExecuted())
         wsoutput = AnalysisDataService.retrieve(OutputWorkspaceName)
         self.assertEqual(['success', 'success'], wsoutput.column(8))
@@ -71,7 +71,7 @@ class FindEPPTest(unittest.TestCase):
                                           NumBanks=2, BankPixelWidth=1, XMin=0, XMax=10, BinWidth=0.1)
 
         OutputWorkspaceName = "outputws3"
-        alg_test = run_algorithm("FindEPP", InputWorkspace=ws_linear, OutputWorkspace=OutputWorkspaceName)
+        alg_test = run_algorithm("FindEPP", InputWorkspace=ws_linear, OutputWorkspace=OutputWorkspaceName, Version=1)
         self.assertTrue(alg_test.isExecuted())
         wsoutput = AnalysisDataService.retrieve(OutputWorkspaceName)
         self.assertEqual(['failed', 'failed'], wsoutput.column(8))
@@ -89,7 +89,7 @@ class FindEPPTest(unittest.TestCase):
                                           NumBanks=2, BankPixelWidth=1, XMin=0, XMax=10, BinWidth=0.1)
 
         OutputWorkspaceName = "outputws4"
-        alg_test = run_algorithm("FindEPP", InputWorkspace=ws_narrow, OutputWorkspace=OutputWorkspaceName)
+        alg_test = run_algorithm("FindEPP", InputWorkspace=ws_narrow, OutputWorkspace=OutputWorkspaceName, Version=1)
         self.assertTrue(alg_test.isExecuted())
         wsoutput = AnalysisDataService.retrieve(OutputWorkspaceName)
         self.assertEqual(['failed', 'failed'], wsoutput.column(8))
@@ -103,7 +103,7 @@ class FindEPPTest(unittest.TestCase):
 
     def testFitOutputWorkspacesAreDeleted(self):
         OutputWorkspaceName = "outputws1"
-        alg_test = run_algorithm("FindEPP", InputWorkspace=self._input_ws, OutputWorkspace=OutputWorkspaceName)
+        alg_test = run_algorithm("FindEPP", InputWorkspace=self._input_ws, OutputWorkspace=OutputWorkspaceName, Version=1)
         wsoutput = AnalysisDataService.retrieve(OutputWorkspaceName)
         DeleteWorkspace(wsoutput)
         oldOption = mantid.config['MantidOptions.InvisibleWorkspaces']
diff --git a/Framework/PythonInterface/test/python/plugins/algorithms/LoadCIFTest.py b/Framework/PythonInterface/test/python/plugins/algorithms/LoadCIFTest.py
index b8e136fddb23b998332ad6b7e9ecae40b45ded2d..0fdbab965bf4b2a2e1ccc12fe25085ec5d5494df 100644
--- a/Framework/PythonInterface/test/python/plugins/algorithms/LoadCIFTest.py
+++ b/Framework/PythonInterface/test/python/plugins/algorithms/LoadCIFTest.py
@@ -61,6 +61,10 @@ class SpaceGroupBuilderTest(unittest.TestCase):
 
         self.assertEqual(fn('P m -3 m :1'), 'P m -3 m')
         self.assertEqual(fn('P m -3 m :H'), 'P m -3 m')
+        self.assertEqual(fn('F d -3 m S'), 'F d -3 m')
+        self.assertEqual(fn('F d -3 m Z'), 'F d -3 m :2')
+        self.assertEqual(fn('R 3 H'), 'R 3')
+        self.assertEqual(fn('R 3 R'), 'R 3 :r')
 
     def test_getSpaceGroupFromNumber_invalid(self):
         invalid_old = {u'_symmetry_int_tables_number': u'400'}
diff --git a/Framework/PythonInterface/test/python/plugins/algorithms/LoadDNSLegacyTest.py b/Framework/PythonInterface/test/python/plugins/algorithms/LoadDNSLegacyTest.py
index 059b81760cd61e57d5fae6d26fccccb8525afdf5..e82f2ead43cc8554d74cb51eed0edc608d5f1a66 100644
--- a/Framework/PythonInterface/test/python/plugins/algorithms/LoadDNSLegacyTest.py
+++ b/Framework/PythonInterface/test/python/plugins/algorithms/LoadDNSLegacyTest.py
@@ -36,7 +36,7 @@ class LoadDNSLegacyTest(unittest.TestCase):
         self.assertEqual(-8.54, run.getProperty('deterota').value)
         self.assertEqual(8332872, run.getProperty('mon_sum').value)
         self.assertEqual('z', run.getProperty('polarisation').value)
-        self.assertEqual('7', run.getProperty('polarisation_comment').value)
+        self.assertEqual('7', str(run.getProperty('polarisation_comment').value))
         self.assertEqual('no', run.getProperty('normalized').value)
         # check whether detector bank is rotated
         det = ws.getDetector(0)
@@ -128,5 +128,64 @@ class LoadDNSLegacyTest(unittest.TestCase):
         run_algorithm("DeleteWorkspace", Workspace=outputWorkspaceName)
         return
 
+    def test_LoadNoCurtable(self):
+        outputWorkspaceName = "LoadDNSLegacyTest_Test6"
+        filename = "dn134011vana.d_dat"
+        alg_test = run_algorithm("LoadDNSLegacy", Filename=filename, Normalization='no',
+                                 OutputWorkspace=outputWorkspaceName)
+        self.assertTrue(alg_test.isExecuted())
+
+        # Verify some values
+        ws = AnalysisDataService.retrieve(outputWorkspaceName)
+        # dimensions
+        self.assertEqual(24, ws.getNumberHistograms())
+        self.assertEqual(2,  ws.getNumDims())
+        # data array
+        self.assertEqual(31461, ws.readY(1))
+        self.assertEqual(13340, ws.readY(23))
+        # sample logs
+        run = ws.getRun()
+        self.assertEqual(-8.54, run.getProperty('deterota').value)
+        self.assertEqual(8332872, run.getProperty('mon_sum').value)
+        self.assertEqual('z', run.getProperty('polarisation').value)
+        self.assertEqual('7', str(run.getProperty('polarisation_comment').value))
+        self.assertEqual('no', run.getProperty('normalized').value)
+        # check whether detector bank is rotated
+        det = ws.getDetector(0)
+        self.assertAlmostEqual(8.54, ws.detectorSignedTwoTheta(det)*180/pi)
+        run_algorithm("DeleteWorkspace", Workspace=outputWorkspaceName)
+        return
+
+    def test_LoadTOF(self):
+        outputWorkspaceName = "LoadDNSLegacyTest_Test7"
+        filename = "dnstof.d_dat"
+        tof1 = 385.651     # must be changed if L1 will change
+        alg_test = run_algorithm("LoadDNSLegacy", Filename=filename, Normalization='no',
+                                 OutputWorkspace=outputWorkspaceName)
+        self.assertTrue(alg_test.isExecuted())
+
+        # Verify some values
+        ws = AnalysisDataService.retrieve(outputWorkspaceName)
+        # dimensions
+        self.assertEqual(24, ws.getNumberHistograms())
+        self.assertEqual(100,  ws.getNumberBins())
+        # data array
+        self.assertEqual(1, ws.readY(19)[5])
+        self.assertAlmostEqual(tof1, ws.readX(0)[0], 3)
+        self.assertAlmostEqual(tof1+802.0*100, ws.readX(0)[100], 3)
+        # sample logs
+        run = ws.getRun()
+        self.assertEqual(-7.5, run.getProperty('deterota').value)
+        self.assertEqual(100, run.getProperty('tof_channels').value)
+        self.assertEqual(51428, run.getProperty('mon_sum').value)
+        self.assertEqual('z', run.getProperty('polarisation').value)
+        self.assertEqual('7', str(run.getProperty('polarisation_comment').value))
+        self.assertEqual('no', run.getProperty('normalized').value)
+        # check whether detector bank is rotated
+        det = ws.getDetector(0)
+        self.assertAlmostEqual(7.5, ws.detectorSignedTwoTheta(det)*180/pi)
+        run_algorithm("DeleteWorkspace", Workspace=outputWorkspaceName)
+        return
+
 if __name__ == '__main__':
     unittest.main()
diff --git a/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/IndirectILLEnergyTransferTest.py b/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/IndirectILLEnergyTransferTest.py
index e666e353815b51c272cb49c7589b89bc8acd97a3..518934434cb49f12b323f740ded20fcee60bac9c 100644
--- a/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/IndirectILLEnergyTransferTest.py
+++ b/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/IndirectILLEnergyTransferTest.py
@@ -81,6 +81,11 @@ class IndirectILLEnergyTransferTest(unittest.TestCase):
         res = IndirectILLEnergyTransfer(**args)
         self._check_workspace_group(res, 2, 18, 512)
 
+    def test_spectrum_axis(self):
+        args = {'Run': self._runs['one_wing_EFWS'], 'SpectrumAxis': '2Theta'}
+        res = IndirectILLEnergyTransfer(**args)
+        self.assertTrue(res.getItem(0).getAxis(1).getUnit().unitID(), "Theta")
+
     def _check_workspace_group(self, wsgroup, nentries, nspectra, nbins):
 
         self.assertTrue(isinstance(wsgroup, WorkspaceGroup),
diff --git a/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/sans/CMakeLists.txt b/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/sans/CMakeLists.txt
index 384525860e635b3a126feefa1984f82be2e19e3e..bdb3723c7e73203104083265e87e3b225980d7f3 100644
--- a/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/sans/CMakeLists.txt
+++ b/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/sans/CMakeLists.txt
@@ -3,8 +3,12 @@
 ######################
 
 set ( TEST_PY_FILES
+SANSCalculateTransmissionTest.py
 SANSConvertToQTest.py
 SANSConvertToWavelengthTest.py
+SANSCreateAdjustmentWorkspacesTest.py
+SANSCreateWavelengthAndPixelAdjustmentTest.py
+SANSNormalizeToMonitorTest.py
 SANSScaleTest.py
 SANSSliceEventTest.py
 )
diff --git a/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/sans/SANSCalculateTransmissionTest.py b/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/sans/SANSCalculateTransmissionTest.py
new file mode 100644
index 0000000000000000000000000000000000000000..86eae1326135e4e8d82191f37df8ba45a207e5fb
--- /dev/null
+++ b/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/sans/SANSCalculateTransmissionTest.py
@@ -0,0 +1,365 @@
+from __future__ import (absolute_import, division, print_function)
+import unittest
+import mantid
+import numpy as np
+from sans.test_helper.test_director import TestDirector
+from sans.state.calculate_transmission import get_calculate_transmission_builder
+from sans.common.enums import (RebinType, RangeStepType, FitType)
+from sans.common.general_functions import (create_unmanaged_algorithm)
+from sans.common.constants import EMPTY_NAME
+
+
+def get_expected_for_spectrum_n(data_workspace, selected_workspace_index, value_array):
+    # Expected output.
+    # 1. The background correction should produce [0., value., value., value.]
+    # 2. Conversion to wavelength changes the x axis to approximately to
+    #                                          [2.7, 5.5, 8.2, 10.9, 13.7] for spectrum 1
+    #                                          [1.0, 2.0, 3.0, 4.1, 5.1]   for spectrum 3
+    # 3. Rebinning from creates the x steps [2, 4, 6, 8]
+    # This means for spectrum 1:
+    #   Bin0:  0
+    #   Bin1: (0 + abs(5.5-6.0)/abs(5.5 - 8.2))*value
+    #   Bin2: (abs(6.0-8.0)/abs(5.5 - 8.2))*value
+    # This means for spectrum 3:
+    #   Bin0: (1. + abs(3.0-4.0)/abs(3.0 - 4.1))*value
+    #   Bin1: (abs(4.0-4.1)/abs(3.0 - 4.1) + 1.)*value
+    #   Bin2: 0
+
+    # The second bin should have abs(4.0-4.7)/abs(3.2 - 4.7)*value + abs(4.7-6.0)/abs(4.7 - 6.3)*value
+    # The third bin should have abs(6.0-6.3)/abs(4.7 - 6.3)*value + value
+    instrument = data_workspace.getInstrument()
+    source = instrument.getSource()
+    detector = data_workspace.getDetector(selected_workspace_index)
+    distance_source_detector = detector.getDistance(source)
+    h = 6.62606896e-34
+    mass = 1.674927211e-27
+    times = data_workspace.dataX(0)
+    lambda_after_unit_conversion = [(time * 1e-6) * h / distance_source_detector / mass * 1e10 for time in times]
+    expected_lambda = [2., 4., 6., 8.]
+    if selected_workspace_index == 0:
+        expected_signal = [0.,
+                           abs(lambda_after_unit_conversion[1] - expected_lambda[2]) /
+                           abs(lambda_after_unit_conversion[1] - lambda_after_unit_conversion[2]) * value_array[1],
+                           (abs(expected_lambda[3] - expected_lambda[2]) /
+                            abs(lambda_after_unit_conversion[1] - lambda_after_unit_conversion[2])) * value_array[1]]
+    else:
+        expected_signal = [1. * value_array[1] + abs(lambda_after_unit_conversion[2] - expected_lambda[1]) /
+                           abs(lambda_after_unit_conversion[2] - lambda_after_unit_conversion[3]) * value_array[2],
+                           abs(lambda_after_unit_conversion[3] - expected_lambda[1]) /
+                           abs(lambda_after_unit_conversion[2] - lambda_after_unit_conversion[3]) * value_array[2] +
+                           1. * value_array[3],
+                           0.]
+
+    return np.array(expected_lambda), np.array(expected_signal)
+
+
+class SANSCalculateTransmissionTest(unittest.TestCase):
+    sample_workspace = None
+    sample_workspace_2 = None
+
+    @staticmethod
+    def _load_workspace(file_name):
+        load_name = "Load"
+        load_options = {"OutputWorkspace": EMPTY_NAME,
+                        "Filename": file_name}
+        load_alg = create_unmanaged_algorithm(load_name, **load_options)
+        load_alg.execute()
+        return load_alg.getProperty("OutputWorkspace").value
+
+    @staticmethod
+    def _clone_workspace(workspace):
+        clone_name = "CloneWorkspace"
+        clone_options = {"InputWorkspace": workspace,
+                         "OutputWorkspace": EMPTY_NAME}
+        clone_alg = create_unmanaged_algorithm(clone_name, **clone_options)
+        clone_alg.execute()
+        return clone_alg.getProperty("OutputWorkspace").value
+
+    @staticmethod
+    def _get_transmission_workspace(data=None):
+        # Load the workspace
+        if SANSCalculateTransmissionTest.sample_workspace is None:
+            SANSCalculateTransmissionTest.sample_workspace = \
+                SANSCalculateTransmissionTest._load_workspace("SANS2D00022024")
+        # Clone the workspace
+        workspace = SANSCalculateTransmissionTest._clone_workspace(SANSCalculateTransmissionTest.sample_workspace)
+        # Prepare the workspace
+        return SANSCalculateTransmissionTest._prepare_workspace(workspace, data=data)
+
+    @staticmethod
+    def _get_roi_workspace():
+        # Load the workspace
+        if SANSCalculateTransmissionTest.sample_workspace_2 is None:
+            SANSCalculateTransmissionTest.sample_workspace_2 = SANSCalculateTransmissionTest._load_workspace("LOQ74044")
+        # Clone the workspace
+        return SANSCalculateTransmissionTest._clone_workspace(SANSCalculateTransmissionTest.sample_workspace_2)
+
+    @staticmethod
+    def _get_state(transmission_radius_on_detector=None, transmission_roi_files=None, transmission_mask_files=None,
+                   transmission_monitor=None, incident_monitor=None, rebin_type=None, wavelength_low=None,
+                   wavelength_high=None, wavelength_step=None, wavelength_step_type=None,
+                   use_full_wavelength_range=None,
+                   wavelength_full_range_low=None, wavelength_full_range_high=None, prompt_peak_correction_min=None,
+                   prompt_peak_correction_max=None, background_TOF_general_start=None, background_TOF_general_stop=None,
+                   background_TOF_monitor_start=None, background_TOF_monitor_stop=None, background_TOF_roi_start=None,
+                   background_TOF_roi_stop=None, sample_fit_type=None, sample_polynomial_order=None,
+                   sample_wavelength_low=None, sample_wavelength_high=None, can_fit_type=None,
+                   can_polynomial_order=None,
+                   can_wavelength_low=None, can_wavelength_high=None):
+        test_director = TestDirector()
+        state = test_director.construct()
+        data_state = state.data
+        calculate_transmission_builder = get_calculate_transmission_builder(data_state)
+        if transmission_radius_on_detector:
+            calculate_transmission_builder.set_transmission_radius_on_detector(transmission_radius_on_detector)
+        if transmission_roi_files:
+            calculate_transmission_builder.set_transmission_roi_files(transmission_roi_files)
+        if transmission_mask_files:
+            calculate_transmission_builder.set_transmission_mask_files(transmission_mask_files)
+        if transmission_monitor:
+            calculate_transmission_builder.set_transmission_monitor(transmission_monitor)
+
+        if incident_monitor:
+            calculate_transmission_builder.set_incident_monitor(incident_monitor)
+        if rebin_type:
+            calculate_transmission_builder.set_rebin_type(rebin_type)
+        if wavelength_low:
+            calculate_transmission_builder.set_wavelength_low(wavelength_low)
+        if wavelength_high:
+            calculate_transmission_builder.set_wavelength_high(wavelength_high)
+        if wavelength_step:
+            calculate_transmission_builder.set_wavelength_step(wavelength_step)
+        if wavelength_step_type:
+            calculate_transmission_builder.set_wavelength_step_type(wavelength_step_type)
+        if use_full_wavelength_range:
+            calculate_transmission_builder.set_use_full_wavelength_range(use_full_wavelength_range)
+        if wavelength_full_range_low:
+            calculate_transmission_builder.set_wavelength_full_range_low(wavelength_full_range_low)
+        if wavelength_full_range_high:
+            calculate_transmission_builder.set_wavelength_full_range_high(wavelength_full_range_high)
+
+        if prompt_peak_correction_min:
+            calculate_transmission_builder.set_prompt_peak_correction_min(prompt_peak_correction_min)
+        if prompt_peak_correction_max:
+            calculate_transmission_builder.set_prompt_peak_correction_max(prompt_peak_correction_max)
+        if prompt_peak_correction_min and prompt_peak_correction_max:
+            calculate_transmission_builder.set_prompt_peak_correction_enabled(True)
+
+        if background_TOF_general_start:
+            calculate_transmission_builder.set_background_TOF_general_start(background_TOF_general_start)
+        if background_TOF_general_stop:
+            calculate_transmission_builder.set_background_TOF_general_stop(background_TOF_general_stop)
+        if background_TOF_monitor_start:
+            calculate_transmission_builder.set_background_TOF_monitor_start(background_TOF_monitor_start)
+        if background_TOF_monitor_stop:
+            calculate_transmission_builder.set_background_TOF_monitor_stop(background_TOF_monitor_stop)
+        if background_TOF_roi_start:
+            calculate_transmission_builder.set_background_TOF_roi_start(background_TOF_roi_start)
+        if background_TOF_roi_stop:
+            calculate_transmission_builder.set_background_TOF_roi_stop(background_TOF_roi_stop)
+
+        if sample_fit_type:
+            calculate_transmission_builder.set_Sample_fit_type(sample_fit_type)
+        if sample_polynomial_order:
+            calculate_transmission_builder.set_Sample_polynomial_order(sample_polynomial_order)
+        if sample_wavelength_low:
+            calculate_transmission_builder.set_Sample_wavelength_low(sample_wavelength_low)
+        if sample_wavelength_high:
+            calculate_transmission_builder.set_Sample_wavelength_high(sample_wavelength_high)
+
+        if can_fit_type:
+            calculate_transmission_builder.set_Can_fit_type(can_fit_type)
+        if can_polynomial_order:
+            calculate_transmission_builder.set_Can_polynomial_order(can_polynomial_order)
+        if can_wavelength_low:
+            calculate_transmission_builder.set_Can_wavelength_low(can_wavelength_low)
+        if can_wavelength_high:
+            calculate_transmission_builder.set_Can_wavelength_high(can_wavelength_high)
+        calculate_transmission = calculate_transmission_builder.build()
+        state.adjustment.calculate_transmission = calculate_transmission
+        return state.property_manager
+
+    @staticmethod
+    def _prepare_workspace(workspace, data=None):
+        """
+        Creates a test monitor workspace with 4 bins
+        """
+        # Rebin the workspace
+        rebin_name = "Rebin"
+        rebin_options = {"InputWorkspace": workspace,
+                         "OutputWorkspace": EMPTY_NAME,
+                         "Params": "5000,5000,25000"}
+        rebin_alg = create_unmanaged_algorithm(rebin_name, **rebin_options)
+        rebin_alg.execute()
+        rebinned = rebin_alg.getProperty("OutputWorkspace").value
+
+        # Now set specified monitors to specified values
+        if data is not None:
+            for key, value in list(data.items()):
+                data_y = rebinned.dataY(key)
+                for index in range(len(data_y)):
+                    data_y[index] = value[index]
+        return rebinned
+
+    @staticmethod
+    def _run_test(transmission_workspace, direct_workspace, state, is_sample=True):
+        calculate_transmission_name = "SANSCalculateTransmission"
+        calculate_transmission_options = {"TransmissionWorkspace": transmission_workspace,
+                                          "DirectWorkspace": direct_workspace,
+                                          "SANSState": state,
+                                          "OutputWorkspace": EMPTY_NAME,
+                                          "UnfittedData": "unfitted"}
+        if is_sample:
+            calculate_transmission_options.update({"DataType": "Sample"})
+        else:
+            calculate_transmission_options.update({"DataType": "Can"})
+
+        calculate_transmission_alg = create_unmanaged_algorithm(calculate_transmission_name,
+                                                                **calculate_transmission_options)
+        calculate_transmission_alg.execute()
+        workspace = calculate_transmission_alg.getProperty("OutputWorkspace").value
+        unfitted = calculate_transmission_alg.getProperty("UnfittedData").value
+        return workspace, unfitted
+
+    def _do_assert(self, transmission_workspace, direct_workspace, unfitted_workspace, fitted_workspace,
+                   trans_incident, trans_trans, direct_incident, direct_trans):
+        # Perform background correction, conversion to wavelength and rebinning
+        trans_incident_background_corrected = np.array(trans_incident) - trans_incident[0]
+        trans_trans_background_corrected = np.array(trans_trans) - trans_trans[0]
+        direct_incident_background_corrected = np.array(direct_incident) - direct_incident[0]
+        direct_trans_background_corrected = np.array(direct_trans) - direct_trans[0]
+
+        trans_incident_lambda, trans_incident_signal = get_expected_for_spectrum_n(transmission_workspace, 0,
+                                                                                   trans_incident_background_corrected)
+        trans_lambda, trans_signal = get_expected_for_spectrum_n(transmission_workspace, 2,
+                                                                 trans_trans_background_corrected)
+        direct_incident_lambda, direct_incident_signal = get_expected_for_spectrum_n(direct_workspace, 0,
+                                                                                     direct_incident_background_corrected)
+        direct_lambda, direct_signal = get_expected_for_spectrum_n(direct_workspace, 2,
+                                                                   direct_trans_background_corrected)
+
+        # Perform step transmission calculation
+        with np.errstate(divide='ignore'):
+            ratio = (trans_signal / trans_incident_signal) / (direct_signal / direct_incident_signal)
+            ratio = np.nan_to_num(ratio)
+
+        # Assert
+        tolerance = 1e-8
+        self.assertTrue(fitted_workspace.getNumberHistograms() == 1)
+        self.assertTrue(unfitted_workspace.getNumberHistograms() == 1)
+        self.assertTrue(fitted_workspace.YUnitLabel() == "Transmission")
+        self.assertTrue(unfitted_workspace.YUnitLabel() == "Transmission")
+        self.assertTrue(unfitted_workspace.isDistribution())
+        self.assertTrue(fitted_workspace.isDistribution())
+
+        for e1, e2 in zip(fitted_workspace.dataX(0), trans_lambda):
+            self.assertTrue(abs(e1 - e2) < tolerance)
+
+        for e1, e2 in zip(unfitted_workspace.dataY(0), ratio):
+            self.assertTrue(abs(e1 - e2) < tolerance)
+            self.assertTrue(e1 <= 1.0)  # The transmission has to be smaller or equal to 1
+
+    def test_that_calculates_transmission_for_general_background_and_no_prompt_peak(self):
+        # Arrange
+        state = SANSCalculateTransmissionTest._get_state(rebin_type=RebinType.Rebin, wavelength_low=2.,
+                                                         wavelength_high=8., wavelength_step=2.,
+                                                         wavelength_step_type=RangeStepType.Lin,
+                                                         background_TOF_general_start=5000.,
+                                                         background_TOF_general_stop=10000., incident_monitor=1,
+                                                         transmission_monitor=3, sample_fit_type=FitType.Linear,
+                                                         sample_polynomial_order=0, sample_wavelength_low=2.,
+                                                         sample_wavelength_high=8.)
+        # Get a test monitor workspace with 4 bins where the first bin is the back ground
+        trans_incident = [20., 220., 210., 230.]
+        trans_trans = [10., 70., 50., 80.]
+        direct_incident = [40., 401., 430., 420.]
+        direct_trans = [30., 320., 350., 335.]
+        data_transmission = {0: trans_incident, 2: trans_trans}
+        transmission_workspace = SANSCalculateTransmissionTest._get_transmission_workspace(data=data_transmission)
+        data_direct = {0: direct_incident, 2: direct_trans}
+        direct_workspace = SANSCalculateTransmissionTest._get_transmission_workspace(data=data_direct)
+
+        # Act
+        fitted_workspace, unfitted_workspace = SANSCalculateTransmissionTest._run_test(transmission_workspace,
+                                                                                       direct_workspace, state,
+                                                                                       is_sample=True)
+        # Assert
+        self._do_assert(transmission_workspace, direct_workspace, unfitted_workspace, fitted_workspace,
+                        trans_incident, trans_trans, direct_incident, direct_trans)
+
+    def test_that_calculates_transmission_for_monitor_specific_background_and_prompt_peak_for_can(self):
+        # Arrange
+        incident_spectrum = 1
+        transmission_spectrum = 3
+        fix_for_remove_bins = 1e-6
+        background_TOF_monitor_start = {str(incident_spectrum): 5000., str(transmission_spectrum): 5000.}
+        background_TOF_monitor_stop = {str(incident_spectrum): 10000., str(transmission_spectrum): 10000.}
+        state = SANSCalculateTransmissionTest._get_state(rebin_type=RebinType.Rebin, wavelength_low=2.,
+                                                         wavelength_high=8., wavelength_step=2.,
+                                                         wavelength_step_type=RangeStepType.Lin,
+                                                         prompt_peak_correction_min=15000. + fix_for_remove_bins,
+                                                         prompt_peak_correction_max=20000.,
+                                                         background_TOF_monitor_start=background_TOF_monitor_start,
+                                                         background_TOF_monitor_stop=background_TOF_monitor_stop,
+                                                         incident_monitor=incident_spectrum,
+                                                         transmission_monitor=transmission_spectrum,
+                                                         can_fit_type=FitType.Linear,
+                                                         can_polynomial_order=0, can_wavelength_low=2.,
+                                                         can_wavelength_high=8.)
+        # Get a test monitor workspace with 4 bins where the first bin is the back ground
+        trans_incident = [20., 220., 210000000., 220.]
+        trans_trans = [10., 80., 210000000., 80.]
+        direct_incident = [40., 401., 210000000., 401.]
+        direct_trans = [30., 320., 210000000., 320.]
+        data_transmission = {0: trans_incident, 2: trans_trans}
+        transmission_workspace = SANSCalculateTransmissionTest._get_transmission_workspace(data=data_transmission)
+        data_direct = {0: direct_incident, 2: direct_trans}
+        direct_workspace = SANSCalculateTransmissionTest._get_transmission_workspace(data=data_direct)
+
+        # Act
+        fitted_workspace, unfitted_workspace = SANSCalculateTransmissionTest._run_test(transmission_workspace,
+                                                                                       direct_workspace, state,
+                                                                                       is_sample=False)
+        # Assert
+        trans_incident[2] = trans_incident[3]
+        trans_trans[2] = trans_trans[3]
+        direct_incident[2] = direct_incident[3]
+        direct_trans[2] = direct_trans[3]
+
+        self._do_assert(transmission_workspace, direct_workspace, unfitted_workspace, fitted_workspace,
+                        trans_incident, trans_trans, direct_incident, direct_trans)
+
+    def test_that_can_get_transmission_for_region_of_interest_radius(self):
+        # This test picks the monitor detector ids based on a radius around the centre of the detector. This is much
+        # more tricky to test here and in principle the main tests should be happening in the actual
+        # CalculateTransmission algorithm.
+        # Arrange
+        state = SANSCalculateTransmissionTest._get_state(rebin_type=RebinType.Rebin, wavelength_low=2.,
+                                                         wavelength_high=8., wavelength_step=2.,
+                                                         wavelength_step_type=RangeStepType.Lin,
+                                                         background_TOF_general_start=5000.,
+                                                         background_TOF_general_stop=10000., incident_monitor=1,
+                                                         transmission_radius_on_detector=0.01,
+                                                         sample_fit_type=FitType.Linear,
+                                                         sample_polynomial_order=0, sample_wavelength_low=2.,
+                                                         sample_wavelength_high=8.)
+        # Gets the full workspace
+        transmission_workspace = SANSCalculateTransmissionTest._get_roi_workspace()
+        direct_workspace = SANSCalculateTransmissionTest._get_roi_workspace()
+
+        # Act
+        try:
+            fitted_workspace, unfitted_workspace = SANSCalculateTransmissionTest._run_test(transmission_workspace,
+                                                                                           direct_workspace, state,
+                                                                                           is_sample=True)
+            was_successful = True
+            self.assertTrue(fitted_workspace.getNumberHistograms() == 1)
+        except:  # noqa
+            was_successful = False
+
+        # Assert
+        self.assertTrue(was_successful)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/sans/SANSCreateAdjustmentWorkspacesTest.py b/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/sans/SANSCreateAdjustmentWorkspacesTest.py
new file mode 100644
index 0000000000000000000000000000000000000000..d5a4525464825225d6d78e2fae1e4a04f99a46f5
--- /dev/null
+++ b/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/sans/SANSCreateAdjustmentWorkspacesTest.py
@@ -0,0 +1,166 @@
+from __future__ import (absolute_import, division, print_function)
+import unittest
+import mantid
+from sans.test_helper.test_director import TestDirector
+from sans.common.general_functions import create_unmanaged_algorithm
+from sans.common.constants import EMPTY_NAME
+from sans.common.enums import (DetectorType, DataType)
+
+
+class SANSCreateAdjustmentWorkspacesTest(unittest.TestCase):
+    sample_workspace = None
+    test_tof_min = 1000
+    test_tof_max = 10000
+    test_tof_width = 1000
+    test_wav_min = 1.
+    test_wav_max = 11.
+    test_wav_width = 2.
+
+    @staticmethod
+    def _get_state():
+        test_director = TestDirector()
+        return test_director.construct()
+
+    @staticmethod
+    def _get_sample_monitor_data(value):
+        create_name = "CreateSampleWorkspace"
+        name = "test_workspace"
+        create_options = {"OutputWorkspace": name,
+                          "NumBanks": 0,
+                          "NumMonitors": 8,
+                          "XMin": SANSCreateAdjustmentWorkspacesTest.test_tof_min,
+                          "XMax": SANSCreateAdjustmentWorkspacesTest.test_tof_max,
+                          "BinWidth": SANSCreateAdjustmentWorkspacesTest.test_tof_width}
+        create_alg = create_unmanaged_algorithm(create_name, **create_options)
+        create_alg.execute()
+        monitor_workspace = create_alg.getProperty("OutputWorkspace").value
+        for hist in range(monitor_workspace.getNumberHistograms()):
+            data_y = monitor_workspace.dataY(hist)
+            for index in range(len(data_y)):
+                data_y[index] = value
+            # This will be the background bin
+            data_y[0] = 0.1
+        return monitor_workspace
+
+    @staticmethod
+    def _get_sample_data():
+        create_name = "CreateSampleWorkspace"
+        name = "test_workspace"
+        create_options = {"OutputWorkspace": name,
+                          "NumBanks": 1,
+                          "NumMonitors": 1,
+                          "XMin": SANSCreateAdjustmentWorkspacesTest.test_wav_min,
+                          "XMax": SANSCreateAdjustmentWorkspacesTest.test_wav_max,
+                          "BinWidth": SANSCreateAdjustmentWorkspacesTest.test_wav_width,
+                          "XUnit": "Wavelength"}
+        create_alg = create_unmanaged_algorithm(create_name, **create_options)
+        create_alg.execute()
+        return create_alg.getProperty("OutputWorkspace").value
+
+    @staticmethod
+    def _load_workspace(file_name):
+        load_name = "Load"
+        load_options = {"OutputWorkspace": EMPTY_NAME,
+                        "Filename": file_name}
+        load_alg = create_unmanaged_algorithm(load_name, **load_options)
+        load_alg.execute()
+        return load_alg.getProperty("OutputWorkspace").value
+
+    @staticmethod
+    def _clone_workspace(workspace):
+        clone_name = "CloneWorkspace"
+        clone_options = {"InputWorkspace": workspace,
+                         "OutputWorkspace": EMPTY_NAME}
+        clone_alg = create_unmanaged_algorithm(clone_name, **clone_options)
+        clone_alg.execute()
+        return clone_alg.getProperty("OutputWorkspace").value
+
+    @staticmethod
+    def _rebin_workspace(workspace):
+        rebin_name = "Rebin"
+        rebin_options = {"InputWorkspace": workspace,
+                         "OutputWorkspace": EMPTY_NAME,
+                         "Params": "{0}, {1}, {2}".format(SANSCreateAdjustmentWorkspacesTest.test_tof_min,
+                                                          SANSCreateAdjustmentWorkspacesTest.test_tof_width,
+                                                          SANSCreateAdjustmentWorkspacesTest.test_tof_max)}
+        rebin_alg = create_unmanaged_algorithm(rebin_name, **rebin_options)
+        rebin_alg.execute()
+        return rebin_alg.getProperty("OutputWorkspace").value
+
+    @staticmethod
+    def _get_trans_type_data(value):
+        # Load the workspace
+        if SANSCreateAdjustmentWorkspacesTest.sample_workspace is None:
+            SANSCreateAdjustmentWorkspacesTest.sample_workspace = \
+                SANSCreateAdjustmentWorkspacesTest._load_workspace("SANS2D00022024")
+        # Clone the workspace
+        workspace = SANSCreateAdjustmentWorkspacesTest._clone_workspace(
+                                                                    SANSCreateAdjustmentWorkspacesTest.sample_workspace)
+        rebinned = SANSCreateAdjustmentWorkspacesTest._rebin_workspace(workspace)
+        # Set all entries to value
+        for hist in range(rebinned.getNumberHistograms()):
+            data_y = rebinned.dataY(hist)
+            for index in range(len(data_y)):
+                data_y[index] = value
+            # This will be the background bin
+            data_y[0] = 0.1
+        return rebinned
+
+    @staticmethod
+    def _run_test(state, sample_data, sample_monitor_data, transmission_data, direct_data, is_lab=True, is_sample=True):
+        adjustment_name = "SANSCreateAdjustmentWorkspaces"
+        adjustment_options = {"SANSState": state,
+                              "SampleData": sample_data,
+                              "MonitorWorkspace": sample_monitor_data,
+                              "TransmissionWorkspace": transmission_data,
+                              "DirectWorkspace": direct_data,
+                              "OutputWorkspaceWavelengthAdjustment": EMPTY_NAME,
+                              "OutputWorkspacePixelAdjustment": EMPTY_NAME,
+                              "OutputWorkspaceWavelengthAndPixelAdjustment": EMPTY_NAME}
+        if is_sample:
+            adjustment_options.update({"DataType": DataType.to_string(DataType.Sample)})
+        else:
+            adjustment_options.update({"DataType": DataType.to_string(DataType.Can)})
+        if is_lab:
+            adjustment_options.update({"Component": DetectorType.to_string(DetectorType.LAB)})
+        else:
+            adjustment_options.update({"Component": DetectorType.to_string(DetectorType.HAB)})
+
+        adjustment_alg = create_unmanaged_algorithm(adjustment_name, **adjustment_options)
+        adjustment_alg.execute()
+        wavelength_adjustment = adjustment_alg.getProperty("OutputWorkspaceWavelengthAdjustment").value
+        pixel_adjustment = adjustment_alg.getProperty("OutputWorkspacePixelAdjustment").value
+        wavelength_and_pixel_adjustment = adjustment_alg.getProperty(
+                                                            "OutputWorkspaceWavelengthAndPixelAdjustment").value
+        return wavelength_adjustment, pixel_adjustment, wavelength_and_pixel_adjustment
+
+    def test_that_adjustment_workspaces_are_produced_wavelenth_and_wavlength_plus_pixel(self):
+        # Arrange
+        state = SANSCreateAdjustmentWorkspacesTest._get_state()
+        state.adjustment.wide_angle_correction = True
+        serialized_state = state.property_manager
+        sample_data = SANSCreateAdjustmentWorkspacesTest._get_sample_data()
+        sample_monitor_data = SANSCreateAdjustmentWorkspacesTest._get_sample_monitor_data(3.)
+        transmission_data = SANSCreateAdjustmentWorkspacesTest._get_trans_type_data(1.)
+        direct_data = SANSCreateAdjustmentWorkspacesTest._get_trans_type_data(2.)
+
+        # Act
+        try:
+            wavelength_adjustment, pixel_adjustment, wavelength_and_pixel_adjustment = \
+                SANSCreateAdjustmentWorkspacesTest._run_test(serialized_state, sample_data, sample_monitor_data,
+                                                             transmission_data, direct_data)
+            raised = False
+            # We expect a wavelength adjustment workspace
+            self.assertTrue(wavelength_adjustment)
+            # We don't expect a pixel adjustment workspace since no files where specified
+            self.assertFalse(pixel_adjustment)
+            # We expect a wavelength and pixel adjustment workspace since we set the flag to true and provided a
+            # sample data set
+            self.assertTrue(wavelength_and_pixel_adjustment)
+        except:  # noqa
+            raised = True
+        self.assertFalse(raised)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/sans/SANSCreateWavelengthAndPixelAdjustmentTest.py b/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/sans/SANSCreateWavelengthAndPixelAdjustmentTest.py
new file mode 100644
index 0000000000000000000000000000000000000000..47d70b8a2c807e8952d5b20a7276de82498ebfaf
--- /dev/null
+++ b/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/sans/SANSCreateWavelengthAndPixelAdjustmentTest.py
@@ -0,0 +1,158 @@
+from __future__ import (absolute_import, division, print_function)
+import unittest
+import mantid
+
+import os
+import numpy as np
+from sans.test_helper.test_director import TestDirector
+from sans.state.wavelength_and_pixel_adjustment import get_wavelength_and_pixel_adjustment_builder
+from sans.common.enums import (RebinType, RangeStepType, DetectorType)
+from sans.common.general_functions import (create_unmanaged_algorithm)
+from sans.common.constants import EMPTY_NAME
+
+
+class SANSCalculateTransmissionTest(unittest.TestCase):
+    @staticmethod
+    def _create_test_wavelength_adjustment_file(file_name):
+        test_file = ("  Tue 24-MAR-2015 00:02 Workspace: directbeam_new_hist\n"
+                     "\n"
+                     "  6    0    0    0    1  6    0\n"
+                     "         0         0         0         0\n"
+                     " 3 (F12.5,2E16.6)\n"
+                     "     1.00000    5.000000e-01    5.000000e-01\n"
+                     "     3.00000    5.000000e-01    5.000000e-01\n"
+                     "     5.00000    5.000000e-01    5.000000e-01\n"
+                     "     7.00000    5.000000e-01    5.000000e-01\n"
+                     "     9.00000    5.000000e-01    5.000000e-01\n"
+                     "    11.00000    5.000000e-01    5.000000e-01\n")
+
+        full_file_path = os.path.join(mantid.config.getString('defaultsave.directory'), file_name)
+        if os.path.exists(full_file_path):
+            os.remove(full_file_path)
+
+        with open(full_file_path, 'w') as f:
+            f.write(test_file)
+        return full_file_path
+
+    @staticmethod
+    def _remove_test_file(file_name):
+        if os.path.exists(file_name):
+           os.remove(file_name)
+
+    @staticmethod
+    def _get_state(lab_pixel_file=None, hab_pixel_file=None, lab_wavelength_file=None, hab_wavelength_file=None,
+                   wavelength_low=None, wavelength_high=None, wavelength_step=None,
+                   wavelength_step_type=None):
+        test_director = TestDirector()
+        state = test_director.construct()
+        data_state = state.data
+        wavelength_and_pixel_builder = get_wavelength_and_pixel_adjustment_builder(data_state)
+        if lab_pixel_file:
+            wavelength_and_pixel_builder.set_LAB_pixel_adjustment_file(lab_pixel_file)
+        if hab_pixel_file:
+            wavelength_and_pixel_builder.set_HAB_pixel_adjustment_file(hab_pixel_file)
+        if lab_wavelength_file:
+            wavelength_and_pixel_builder.set_LAB_wavelength_adjustment_file(lab_wavelength_file)
+        if hab_wavelength_file:
+            wavelength_and_pixel_builder.set_HAB_wavelength_adjustment_file(hab_wavelength_file)
+        if wavelength_step_type:
+            wavelength_and_pixel_builder.set_wavelength_step_type(wavelength_step_type)
+        if wavelength_low:
+            wavelength_and_pixel_builder.set_wavelength_low(wavelength_low)
+        if wavelength_high:
+            wavelength_and_pixel_builder.set_wavelength_high(wavelength_high)
+        if wavelength_step:
+            wavelength_and_pixel_builder.set_wavelength_step(wavelength_step)
+        wavelength_and_pixel_state = wavelength_and_pixel_builder.build()
+        state.adjustment.wavelength_and_pixel_adjustment = wavelength_and_pixel_state
+        return state.property_manager
+
+    @staticmethod
+    def _get_workspace(data):
+        create_name = "CreateSampleWorkspace"
+        create_options = {"NumBanks": 1,
+                          "BankPixelWidth": 1,
+                          "XMin": 1,
+                          "XMax": 11,
+                          "BinWidth": 2,
+                          "XUnit": "Wavelength",
+                          "OutputWorkspace": EMPTY_NAME}
+        create_alg = create_unmanaged_algorithm(create_name, **create_options)
+        create_alg.execute()
+        workspace = create_alg.getProperty("OutputWorkspace").value
+        data_y = workspace.dataY(0)
+        for index in range(len(data_y)):
+            data_y[index] = data[index]
+        return workspace
+
+    @staticmethod
+    def _run_test(transmission_workspace, norm_workspace, state, is_lab=True):
+        adjust_name = "SANSCreateWavelengthAndPixelAdjustment"
+        adjust_options = {"TransmissionWorkspace": transmission_workspace,
+                          "NormalizeToMonitorWorkspace": norm_workspace,
+                          "SANSState": state,
+                          "OutputWorkspaceWavelengthAdjustment": "out_wavelength",
+                          "OutputWorkspacePixelAdjustment": "out_pixels"}
+        if is_lab:
+            adjust_options.update({"Component": DetectorType.to_string(DetectorType.LAB)})
+        else:
+            adjust_options.update({"Component": DetectorType.to_string(DetectorType.HAB)})
+        adjust_alg = create_unmanaged_algorithm(adjust_name, **adjust_options)
+        adjust_alg.execute()
+        wavelength_adjustment = adjust_alg.getProperty("OutputWorkspaceWavelengthAdjustment").value
+        pixel_adjustment = adjust_alg.getProperty("OutputWorkspacePixelAdjustment").value
+        return wavelength_adjustment, pixel_adjustment
+
+    def test_that_gets_wavelength_workspace_when_no_files_are_specified(self):
+        # Arrange
+        data_trans = [3., 4., 5., 7., 3.]
+        data_norm = [9., 3., 8., 3., 1.]
+        transmission_workspace = SANSCalculateTransmissionTest._get_workspace(data_trans)
+        norm_workspace = SANSCalculateTransmissionTest._get_workspace(data_norm)
+
+        state = SANSCalculateTransmissionTest._get_state(wavelength_low=1., wavelength_high=11., wavelength_step=2.,
+                                                         wavelength_step_type=RangeStepType.Lin)
+
+        # Act
+        wavelength_adjustment, pixel_adjustment = SANSCalculateTransmissionTest._run_test(transmission_workspace,
+                                                                                          norm_workspace, state, True)
+        # Assert
+        self.assertTrue(pixel_adjustment is None)
+        self.assertTrue(wavelength_adjustment.getNumberHistograms() == 1)
+        expected = np.array(data_trans)*np.array(data_norm)
+        data_y = wavelength_adjustment.dataY(0)
+        for e1, e2, in zip(expected, data_y):
+            self.assertTrue(e1 == e2)
+
+    def test_that_gets_adjustment_workspace_if_files_are_specified(self):
+        # Arrange
+
+        data_trans = [3., 4., 5., 7., 3.]
+        data_norm = [9., 3., 8., 3., 1.]
+        expected_direct_file_workspace = [0.5, 0.5, 0.5, 0.5, 0.5]
+        transmission_workspace = SANSCalculateTransmissionTest._get_workspace(data_trans)
+        norm_workspace = SANSCalculateTransmissionTest._get_workspace(data_norm)
+
+        direct_file_name = "DIRECT_test.txt"
+        direct_file_name = SANSCalculateTransmissionTest._create_test_wavelength_adjustment_file(direct_file_name)
+
+        state = SANSCalculateTransmissionTest._get_state(hab_wavelength_file=direct_file_name,
+                                                         wavelength_low=1., wavelength_high=11., wavelength_step=2.,
+                                                         wavelength_step_type=RangeStepType.Lin)
+        # Act
+        wavelength_adjustment, pixel_adjustment = SANSCalculateTransmissionTest._run_test(transmission_workspace,
+                                                                                          norm_workspace, state, False)
+        # Assert
+        self.assertTrue(pixel_adjustment is None)
+        self.assertTrue(wavelength_adjustment.getNumberHistograms() == 1)
+        expected = np.array(data_trans)*np.array(data_norm)*np.array(expected_direct_file_workspace)
+        data_y = wavelength_adjustment.dataY(0)
+        for e1, e2, in zip(expected, data_y):
+            self.assertTrue(e1 == e2)
+
+        # Clean up
+        SANSCalculateTransmissionTest._remove_test_file(direct_file_name)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/sans/SANSNormalizeToMonitorTest.py b/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/sans/SANSNormalizeToMonitorTest.py
new file mode 100644
index 0000000000000000000000000000000000000000..0f176c9933d975d1d88b6ed8c5cd62061da32d19
--- /dev/null
+++ b/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/sans/SANSNormalizeToMonitorTest.py
@@ -0,0 +1,204 @@
+from __future__ import (absolute_import, division, print_function)
+import unittest
+import mantid
+
+from sans.test_helper.test_director import TestDirector
+from sans.state.normalize_to_monitor import get_normalize_to_monitor_builder
+from sans.common.enums import (RebinType, RangeStepType)
+from sans.common.general_functions import (create_unmanaged_algorithm)
+from sans.common.constants import EMPTY_NAME
+
+
+def get_expected_for_spectrum_1_case(monitor_workspace, selected_detector):
+    # Expected output.
+    # 1. The background correction should produce [0., 90., 90., 90.]
+    # 2. Conversion to wavelength changes the x axis to approximately [1.6, 3.2, 4.7, 6.3, 7.9]
+    # 3. Rebinning from creates the x steps [2, 4, 6, 8]
+    # The first bin should have 0 + abs(3.2-4.0)/abs(3.2 - 4.7)*90
+    # The second bin should have abs(4.0-4.7)/abs(3.2 - 4.7)*90 + abs(4.7-6.0)/abs(4.7 - 6.3)*90
+    # The third bin should have abs(6.0-6.3)/abs(4.7 - 6.3)*90 + 90
+    instrument = monitor_workspace.getInstrument()
+    source = instrument.getSource()
+    detector = monitor_workspace.getDetector(selected_detector)
+    distance_source_detector = detector.getDistance(source)
+    h = 6.62606896e-34
+    mass = 1.674927211e-27
+    times = monitor_workspace.dataX(0)
+    lambda_after_unit_conversion = [(time * 1e-6) * h / distance_source_detector / mass * 1e10 for time in times]
+    expected_lambda = [2., 4., 6., 8.]
+    expected_signal = [abs(lambda_after_unit_conversion[1] - expected_lambda[1]) /
+                       abs(lambda_after_unit_conversion[1] - lambda_after_unit_conversion[2]) * 90,
+                       abs(lambda_after_unit_conversion[2] - expected_lambda[1]) /
+                       abs(lambda_after_unit_conversion[1] - lambda_after_unit_conversion[2]) * 90 +
+                       abs(lambda_after_unit_conversion[2] - expected_lambda[2]) /
+                       abs(lambda_after_unit_conversion[2] - lambda_after_unit_conversion[3]) * 90,
+                       abs(lambda_after_unit_conversion[3] - expected_lambda[2]) /
+                       abs(lambda_after_unit_conversion[2] - lambda_after_unit_conversion[3]) * 90 + 90]
+    return expected_lambda, expected_signal
+
+
+class SANSNormalizeToMonitorTest(unittest.TestCase):
+
+    @staticmethod
+    def _get_monitor_workspace(data=None):
+        create_name = "CreateSampleWorkspace"
+        name = "test_workspace"
+        create_options = {"OutputWorkspace": name,
+                          "NumBanks": 0,
+                          "NumMonitors": 8}
+        create_alg = create_unmanaged_algorithm(create_name, **create_options)
+        create_alg.execute()
+        ws = create_alg.getProperty("OutputWorkspace").value
+        ws = SANSNormalizeToMonitorTest._prepare_workspace(ws, data=data)
+        return ws
+
+    @staticmethod
+    def _get_state(background_TOF_general_start=None, background_TOF_general_stop=None,
+                   background_TOF_monitor_start=None, background_TOF_monitor_stop=None, incident_monitor=None,
+                   prompt_peak_correction_min=None, prompt_peak_correction_max=None):
+        test_director = TestDirector()
+        state = test_director.construct()
+
+        data_state = state.data
+        normalize_to_monitor_builder = get_normalize_to_monitor_builder(data_state)
+        normalize_to_monitor_builder.set_rebin_type(RebinType.Rebin)
+        normalize_to_monitor_builder.set_wavelength_low(2.)
+        normalize_to_monitor_builder.set_wavelength_high(8.)
+        normalize_to_monitor_builder.set_wavelength_step(2.)
+        normalize_to_monitor_builder.set_wavelength_step_type(RangeStepType.Lin)
+        if background_TOF_general_start:
+            normalize_to_monitor_builder.set_background_TOF_general_start(background_TOF_general_start)
+        if background_TOF_general_stop:
+            normalize_to_monitor_builder.set_background_TOF_general_stop(background_TOF_general_stop)
+        if background_TOF_monitor_start:
+            normalize_to_monitor_builder.set_background_TOF_monitor_start(background_TOF_monitor_start)
+        if background_TOF_monitor_stop:
+            normalize_to_monitor_builder.set_background_TOF_monitor_stop(background_TOF_monitor_stop)
+        if incident_monitor:
+            normalize_to_monitor_builder.set_incident_monitor(incident_monitor)
+        if prompt_peak_correction_min:
+            normalize_to_monitor_builder.set_prompt_peak_correction_min(prompt_peak_correction_min)
+        if prompt_peak_correction_max:
+            normalize_to_monitor_builder.set_prompt_peak_correction_max(prompt_peak_correction_max)
+        if prompt_peak_correction_min and prompt_peak_correction_max:
+            normalize_to_monitor_builder.set_prompt_peak_correction_enabled(True)
+
+        normalize_to_monitor_state = normalize_to_monitor_builder.build()
+        state.adjustment.normalize_to_monitor = normalize_to_monitor_state
+
+        return state.property_manager
+
+    @staticmethod
+    def _prepare_workspace(workspace, data=None):
+        """
+        Creates a test monitor workspace with 4 bins
+        """
+        # Rebin the workspace
+        rebin_name = "Rebin"
+        rebin_options = {"InputWorkspace": workspace,
+                         "OutputWorkspace": EMPTY_NAME,
+                         "Params": "5000,5000,25000"}
+        rebin_alg = create_unmanaged_algorithm(rebin_name, **rebin_options)
+        rebin_alg.execute()
+        rebinned = rebin_alg.getProperty("OutputWorkspace").value
+
+        # Now set specified monitors to specified values
+        if data is not None:
+            for key, value in list(data.items()):
+                data_y = rebinned.dataY(key)
+                for index in range(len(data_y)):
+                    data_y[index] = value[index]
+
+        return rebinned
+
+    @staticmethod
+    def _run_test(workspace, state):
+        normalize_name = "SANSNormalizeToMonitor"
+        normalize_options = {"InputWorkspace": workspace,
+                             "OutputWorkspace": EMPTY_NAME,
+                             "SANSState": state}
+        normalize_alg = create_unmanaged_algorithm(normalize_name, **normalize_options)
+        normalize_alg.execute()
+        return normalize_alg.getProperty("OutputWorkspace").value
+
+    def _do_assert(self, workspace, expected_monitor_spectrum, expected_lambda, expected_signal):
+        # Check the units
+        axis = workspace.getAxis(0)
+        unit = axis.getUnit()
+        self.assertTrue(unit.unitID() == "Wavelength")
+
+        # Check the spectrum
+        self.assertTrue(len(workspace.dataY(0)) == 3)
+        self.assertTrue(workspace.getNumberHistograms() == 1)
+        single_spectrum = workspace.getSpectrum(0)
+        self.assertTrue(single_spectrum.getSpectrumNo() == expected_monitor_spectrum)
+
+        # Check the values
+        tolerance = 1e-8
+        for e1, e2, in zip(workspace.dataX(0), expected_lambda):
+            self.assertTrue(abs(e1 - e2) < tolerance)
+        for e1, e2, in zip(workspace.dataY(0), expected_signal):
+            self.assertTrue(abs(e1-e2) < tolerance)
+
+    def test_that_gets_normalization_for_general_background_and_no_prompt_peak(self):
+        # Arrange
+        incident_spectrum = 1
+        state = SANSNormalizeToMonitorTest._get_state(background_TOF_general_start=5000.,
+                                                      background_TOF_general_stop=10000.,
+                                                      incident_monitor=incident_spectrum)
+        # Get a test monitor workspace with 4 bins where the first bin is the back ground
+        data = {0: [10., 100., 100., 100.]}
+        monitor_workspace = SANSNormalizeToMonitorTest._get_monitor_workspace(data=data)
+        # Act
+        workspace = SANSNormalizeToMonitorTest._run_test(monitor_workspace, state)
+
+        # Assert
+        expected_lambda, expected_signal = get_expected_for_spectrum_1_case(monitor_workspace,
+                                                                            selected_detector=incident_spectrum-1)
+        self._do_assert(workspace, incident_spectrum, expected_lambda, expected_signal)
+
+    def test_that_gets_normalization_for_specific_monitor_background_and_no_prompt_peak(self):
+        # Arrange
+        incident_spectrum = 1
+        background_TOF_monitor_start = {str(incident_spectrum): 5000.}
+        background_TOF_monitor_stop = {str(incident_spectrum): 10000.}
+        state = SANSNormalizeToMonitorTest._get_state(background_TOF_monitor_start=background_TOF_monitor_start,
+                                                      background_TOF_monitor_stop=background_TOF_monitor_stop,
+                                                      incident_monitor=incident_spectrum)
+        # Get a test monitor workspace with 4 bins where the first bin is the back ground
+        data = {0: [10., 100., 100., 100.]}
+        monitor_workspace = SANSNormalizeToMonitorTest._get_monitor_workspace(data=data)
+        # Act
+        workspace = SANSNormalizeToMonitorTest._run_test(monitor_workspace, state)
+
+        # Assert
+        expected_lambda, expected_signal = get_expected_for_spectrum_1_case(monitor_workspace,
+                                                                            selected_detector=incident_spectrum-1)
+        self._do_assert(workspace, incident_spectrum, expected_lambda, expected_signal)
+
+    def test_that_gets_normalization_for_general_background_and_prompt_peak(self):
+        # Arrange
+        incident_spectrum = 1
+        # There seems to be an issue with RemoveBins which does not like us specifying xmin or xmax on bin boundaries
+        # This is a quick workaround.
+        fix_for_remove_bins = 1e-6
+        state = SANSNormalizeToMonitorTest._get_state(background_TOF_general_start=5000.,
+                                                      background_TOF_general_stop=10000.,
+                                                      prompt_peak_correction_min=15000. + fix_for_remove_bins,
+                                                      prompt_peak_correction_max=20000.,
+                                                      incident_monitor=incident_spectrum)
+        # Get a test monitor workspace with 4 bins where the first bin is the back ground and the third bin has
+        # a prompt peak which will be removed
+        data = {0: [10., 100., 1000000., 100.]}
+        monitor_workspace = SANSNormalizeToMonitorTest._get_monitor_workspace(data=data)
+        # Act
+        workspace = SANSNormalizeToMonitorTest._run_test(monitor_workspace, state)
+
+        # Assert
+        expected_lambda, expected_signal = get_expected_for_spectrum_1_case(monitor_workspace,
+                                                                            selected_detector=incident_spectrum-1)
+        self._do_assert(workspace, incident_spectrum, expected_lambda, expected_signal)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Framework/PythonInterface/test/python/plugins/functions/AttributeTest.py b/Framework/PythonInterface/test/python/plugins/functions/AttributeTest.py
index fa6a404d3f022f62301568341b7b0dcd436027a0..215175e9043f53d28cce5ba7d0e4e6fa153ea957 100644
--- a/Framework/PythonInterface/test/python/plugins/functions/AttributeTest.py
+++ b/Framework/PythonInterface/test/python/plugins/functions/AttributeTest.py
@@ -20,7 +20,6 @@ class AttributeExample(IFunction1D):
                     self._freq = value
             if name == "Sine":
                     self._sine = value
-            self.storeAttributeValue(name, value)
 
         def function1D(self,xvals):
             ampl=self.getParameterValue("Amplitude")
diff --git a/Framework/TestHelpers/inc/MantidTestHelpers/FunctionCreationHelper.h b/Framework/TestHelpers/inc/MantidTestHelpers/FunctionCreationHelper.h
new file mode 100644
index 0000000000000000000000000000000000000000..c1370c265f1dbcc88b15427b5be2252f19c48ffd
--- /dev/null
+++ b/Framework/TestHelpers/inc/MantidTestHelpers/FunctionCreationHelper.h
@@ -0,0 +1,31 @@
+#ifndef MANTID_TESTHELPERS_FUNCTIONCREATIONHELPER_H_
+#define MANTID_TESTHELPERS_FUNCTIONCREATIONHELPER_H_
+
+#include "MantidAPI/IFunction1D.h"
+#include "MantidAPI/ParamFunction.h"
+
+namespace Mantid {
+
+namespace TestHelpers {
+
+class FunctionChangesNParams : public Mantid::API::IFunction1D,
+                               public Mantid::API::ParamFunction {
+public:
+  FunctionChangesNParams();
+  std::string name() const override;
+  void iterationStarting() override;
+  void iterationFinished() override;
+
+protected:
+  void function1D(double *out, const double *xValues,
+                  const size_t nData) const override;
+  void functionDeriv1D(Mantid::API::Jacobian *out, const double *xValues,
+                       const size_t nData) override;
+  size_t m_maxNParams = 5;
+  bool m_canChange = false;
+};
+
+} // namespace TestHelpers
+} // namespace Mantid
+
+#endif // MANTID_TESTHELPERS_FUNCTIONCREATIONHELPER_H_
diff --git a/Framework/TestHelpers/inc/MantidTestHelpers/WorkspaceCreationHelper.h b/Framework/TestHelpers/inc/MantidTestHelpers/WorkspaceCreationHelper.h
index c8ada29dfd6dafd424a41f6edacab386d6f5081d..a9d3f7f7fa1b51482746cce2a8e270067ba4232b 100644
--- a/Framework/TestHelpers/inc/MantidTestHelpers/WorkspaceCreationHelper.h
+++ b/Framework/TestHelpers/inc/MantidTestHelpers/WorkspaceCreationHelper.h
@@ -372,7 +372,8 @@ create2DWorkspaceWithReflectometryInstrument(double startX = 0);
 /// Create a 2D workspace with one monitor and three detectors based around
 /// a virtual reflectometry instrument.
 Mantid::API::MatrixWorkspace_sptr
-create2DWorkspaceWithReflectometryInstrumentMultiDetector(double startX = 0);
+create2DWorkspaceWithReflectometryInstrumentMultiDetector(
+    double startX = 0, const double detSize = 0.0);
 
 void createInstrumentForWorkspaceWithDistances(
     Mantid::API::MatrixWorkspace_sptr workspace,
diff --git a/Framework/TestHelpers/src/FunctionCreationHelper.cpp b/Framework/TestHelpers/src/FunctionCreationHelper.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d68907a9fc124ae40c0559eb4d963c8c6d76bb2c
--- /dev/null
+++ b/Framework/TestHelpers/src/FunctionCreationHelper.cpp
@@ -0,0 +1,60 @@
+#include "MantidTestHelpers/FunctionCreationHelper.h"
+#include "MantidKernel/Exception.h"
+
+namespace Mantid {
+namespace TestHelpers {
+
+std::string FunctionChangesNParams::name() const {
+  return "FunctionChangesNParams";
+}
+
+FunctionChangesNParams::FunctionChangesNParams()
+    : Mantid::API::IFunction1D(), Mantid::API::ParamFunction() {
+  this->declareParameter("A0", 0.0);
+}
+
+void FunctionChangesNParams::iterationStarting() { m_canChange = true; }
+
+void FunctionChangesNParams::iterationFinished() {
+  auto np = nParams();
+  if (m_canChange && np < m_maxNParams) {
+    declareParameter("A" + std::to_string(np), 0.0);
+    throw Mantid::Kernel::Exception::FitSizeWarning(np, nParams());
+  }
+  m_canChange = false;
+}
+
+void FunctionChangesNParams::function1D(double *out, const double *xValues,
+                                        const size_t nData) const {
+  auto np = nParams();
+  for (size_t i = 0; i < nData; ++i) {
+    double x = xValues[i];
+    double y = getParameter(np - 1);
+    if (np > 1) {
+      for (size_t ip = np - 1; ip > 0; --ip) {
+        y = getParameter(ip - 1) + x * y;
+      }
+    }
+    out[i] = y;
+  }
+}
+
+void FunctionChangesNParams::functionDeriv1D(Mantid::API::Jacobian *out,
+                                             const double *xValues,
+                                             const size_t nData) {
+  auto np = nParams();
+  for (size_t i = 0; i < nData; ++i) {
+    double x = xValues[i];
+    double y = 1.0;
+    out->set(i, 0, y);
+    if (np > 1) {
+      for (size_t ip = 1; ip < np; ++ip) {
+        y = x * y;
+        out->set(i, ip, y);
+      }
+    }
+  }
+}
+
+} // namespace TestHelpers
+} // namespace Mantid
diff --git a/Framework/TestHelpers/src/WorkspaceCreationHelper.cpp b/Framework/TestHelpers/src/WorkspaceCreationHelper.cpp
index 32887532c4c9330edad99480b96fb126682b577c..7fedb9644cc90949d3aca1d1eaa7cb4cbf91c04d 100644
--- a/Framework/TestHelpers/src/WorkspaceCreationHelper.cpp
+++ b/Framework/TestHelpers/src/WorkspaceCreationHelper.cpp
@@ -526,9 +526,11 @@ create2DWorkspaceWithReflectometryInstrument(double startX) {
 * multiple detectors
 * @return workspace with instrument attached.
 * @param startX : X Tof start value for the workspace.
+* @param detSize : optional detector height (default is 0 which puts all
+* detectors at the same position)
 */
-MatrixWorkspace_sptr
-create2DWorkspaceWithReflectometryInstrumentMultiDetector(double startX) {
+MatrixWorkspace_sptr create2DWorkspaceWithReflectometryInstrumentMultiDetector(
+    double startX, const double detSize) {
   Instrument_sptr instrument = boost::make_shared<Instrument>();
   instrument->setReferenceFrame(
       boost::make_shared<ReferenceFrame>(Y /*up*/, X /*along*/, Left, "0,0,0"));
@@ -548,24 +550,29 @@ create2DWorkspaceWithReflectometryInstrumentMultiDetector(double startX) {
   instrument->add(monitor);
   instrument->markAsMonitor(monitor);
 
+  // Place the central detector at 45 degrees (i.e. the distance
+  // from the sample in Y is the same as the distance in X).
+  const double detPosX = 20;
+  const double detPosY = detPosX - sample->getPos().X();
+
   Detector *det1 = new Detector(
       "point-detector", 2,
       ComponentCreationHelper::createCuboid(0.01, 0.02, 0.03), nullptr);
-  det1->setPos(20, (20 - sample->getPos().X()), 0);
+  det1->setPos(detPosX, detPosY - detSize, 0); // offset below centre
   instrument->add(det1);
   instrument->markAsDetector(det1);
 
   Detector *det2 = new Detector(
       "point-detector", 3,
       ComponentCreationHelper::createCuboid(0.01, 0.02, 0.03), nullptr);
-  det2->setPos(20, (20 - sample->getPos().X()), 0);
+  det2->setPos(detPosX, detPosY, 0); // at centre
   instrument->add(det2);
   instrument->markAsDetector(det2);
 
   Detector *det3 = new Detector(
       "point-detector", 4,
       ComponentCreationHelper::createCuboid(0.01, 0.02, 0.03), nullptr);
-  det3->setPos(20, (20 - sample->getPos().X()), 0);
+  det3->setPos(detPosX, detPosY + detSize, 0); // offset above centre
   instrument->add(det3);
   instrument->markAsDetector(det3);
 
diff --git a/MantidPlot/CMakeLists.txt b/MantidPlot/CMakeLists.txt
index 7f99f3cba60f13bd026062d0f9daace500b09110..a4bdc8cff070161e35797034f2b194dd7c269268 100644
--- a/MantidPlot/CMakeLists.txt
+++ b/MantidPlot/CMakeLists.txt
@@ -183,9 +183,10 @@ set ( MANTID_SRCS  src/Mantid/AlgorithmDockWidget.cpp
                    src/Mantid/MantidMDCurve.cpp
                    src/Mantid/MantidMDCurveDialog.cpp
                    src/Mantid/MantidMatrixDialog.cpp
+                   src/Mantid/MantidPlotUtilities.cpp
                    src/Mantid/MantidSampleLogDialog.cpp
                    src/Mantid/MantidSampleMaterialDialog.cpp
-				   src/Mantid/MantidGroupPlotGenerator.cpp
+                   src/Mantid/MantidSurfaceContourPlotGenerator.cpp
                    src/Mantid/MantidUI.cpp
                    src/Mantid/MantidTable.cpp
                    src/Mantid/PeakPickerTool.cpp
@@ -375,9 +376,10 @@ set ( MANTID_HDRS  src/Mantid/AlgorithmMonitor.h
                    src/Mantid/MantidMatrixDialog.h
                    src/Mantid/MantidMatrix.h
                    src/Mantid/MantidMatrixFunction.h
+                   src/Mantid/MantidPlotUtilities.h
                    src/Mantid/MantidSampleLogDialog.h
                    src/Mantid/MantidSampleMaterialDialog.h
-				   src/Mantid/MantidGroupPlotGenerator.h
+		   src/Mantid/MantidSurfaceContourPlotGenerator.h
                    src/Mantid/MantidUI.h
                    src/Mantid/MantidTable.h
                    src/Mantid/PeakPickerTool.h
diff --git a/MantidPlot/src/ApplicationWindow.cpp b/MantidPlot/src/ApplicationWindow.cpp
index e04dcb5b389be8c1935bebf389a044fb94893fd6..7e25c17a310391517283cabd4359233bd320e6c4 100644
--- a/MantidPlot/src/ApplicationWindow.cpp
+++ b/MantidPlot/src/ApplicationWindow.cpp
@@ -6051,7 +6051,7 @@ bool ApplicationWindow::saveProject(bool compress) {
   return true;
 }
 
-void ApplicationWindow::prepareSaveProject() {
+int ApplicationWindow::execSaveProjectDialog() {
   std::vector<IProjectSerialisable *> windows;
 
   for (auto window : getSerialisableWindows()) {
@@ -6071,9 +6071,11 @@ void ApplicationWindow::prepareSaveProject() {
       projectname, *serialiser, windows, this);
   connect(m_projectSaveView, SIGNAL(projectSaved()), this,
           SLOT(postSaveProject()));
-  m_projectSaveView->show();
+  return m_projectSaveView->exec();
 }
 
+void ApplicationWindow::prepareSaveProject() { execSaveProjectDialog(); }
+
 /**
  * The project was just saved. Update the main window.
  */
@@ -9175,32 +9177,6 @@ void ApplicationWindow::closeWindow(MdiSubWindow *window) {
   emit modified();
 }
 
-/**
- * Called when the user choses to close the program
- */
-void ApplicationWindow::prepareToCloseMantid() {
-  if (!saved) {
-    QString savemsg =
-        tr("Save changes to project: <p><b> %1 </b> ?").arg(projectname);
-    int result =
-        QMessageBox::information(this, tr("MantidPlot"), savemsg, tr("Yes"),
-                                 tr("No"), tr("Cancel"), 0, 2);
-    if (result == 0) {
-      prepareSaveProject();
-      // When we're finished saving trigger the close event
-      connect(m_projectSaveView, SIGNAL(finished(int)), qApp,
-              SLOT(closeAllWindows()));
-      return;
-    } else if (result == 2) {
-      // User wanted to cancel, do nothing
-      return;
-    }
-  }
-
-  // Call to close all the windows and shutdown Mantid
-  QApplication::closeAllWindows();
-}
-
 /** Add a serialisable window to the application
  * @param window :: the window to add
  */
@@ -9809,6 +9785,25 @@ void ApplicationWindow::closeEvent(QCloseEvent *ce) {
     // script is running.
   }
 
+  if (!saved) {
+    QString savemsg =
+        tr("Save changes to project: <p><b> %1 </b> ?").arg(projectname);
+    int result =
+        QMessageBox::information(this, tr("MantidPlot"), savemsg, tr("Yes"),
+                                 tr("No"), tr("Cancel"), 0, 2);
+    if (result == 0) {
+      auto response = execSaveProjectDialog();
+      if (response != QDialog::Accepted) {
+        ce->ignore();
+        return;
+      }
+    } else if (result == 2) {
+      // User wanted to cancel, do nothing
+      ce->ignore();
+      return;
+    }
+  }
+
   // Close the remaining MDI windows. The Python API is required to be active
   // when the MDI window destructor is called so that those references can be
   // cleaned up meaning we cannot rely on the deleteLater functionality to
@@ -9851,6 +9846,7 @@ void ApplicationWindow::closeEvent(QCloseEvent *ce) {
   scriptingEnv()->finalize();
 
   ce->accept();
+  qApp->closeAllWindows();
 }
 
 void ApplicationWindow::customEvent(QEvent *e) {
@@ -11821,8 +11817,7 @@ void ApplicationWindow::createActions() {
   actionCloseAllWindows = new MantidQt::MantidWidgets::TrackedAction(
       QIcon(getQPixmap("quit_xpm")), tr("&Quit"), this);
   actionCloseAllWindows->setShortcut(tr("Ctrl+Q"));
-  connect(actionCloseAllWindows, SIGNAL(triggered()), this,
-          SLOT(prepareToCloseMantid()));
+  connect(actionCloseAllWindows, SIGNAL(triggered()), this, SLOT(close()));
 
   actionDeleteFitTables = new MantidQt::MantidWidgets::TrackedAction(
       QIcon(getQPixmap("close_xpm")), tr("Delete &Fit Tables"), this);
diff --git a/MantidPlot/src/ApplicationWindow.h b/MantidPlot/src/ApplicationWindow.h
index b1a274f27167744bc8b52030723fbb7775f4f6a6..6cacd88828e1dc32670559761869eaad20147d62 100644
--- a/MantidPlot/src/ApplicationWindow.h
+++ b/MantidPlot/src/ApplicationWindow.h
@@ -270,6 +270,8 @@ public slots:
   void saveProjectAs(const QString &fileName = QString(),
                      bool compress = false);
   bool saveProject(bool compress = false);
+  /// Run the project saver dialog
+  int execSaveProjectDialog();
   /// Show the project saver dialog
   void prepareSaveProject();
   /// Update application window post save
@@ -603,7 +605,6 @@ public slots:
   void closeActiveWindow();
   void closeSimilarWindows();
   void closeWindow(MdiSubWindow *window);
-  void prepareToCloseMantid();
 
   //!  Does all the cleaning work before actually deleting a window!
   void removeWindowFromLists(MdiSubWindow *w);
diff --git a/MantidPlot/src/ConfigDialog.cpp b/MantidPlot/src/ConfigDialog.cpp
index 2485ea1e675585115a7ab6e250d72e11969c53c7..c4dbcad0629136a862b349fe03caaa4fc5db1e29 100644
--- a/MantidPlot/src/ConfigDialog.cpp
+++ b/MantidPlot/src/ConfigDialog.cpp
@@ -854,6 +854,15 @@ void ConfigDialog::initMdPlottingVsiTab() {
   grid->addWidget(lblVsiDefaultBackground, 1, 0);
   grid->addWidget(vsiDefaultBackground, 1, 1);
 
+  // Axes Color
+  vsiAxesColor = new QGroupBox();
+  vsiAxesColor->setCheckable(true);
+  vsiAxesColor->setChecked(m_mdSettings.getUserSettingAutoColorAxes());
+  vsiAxesColor->setTitle(tr("Automatic axes color selection"));
+  vsiAxesColor->setToolTip(
+      tr("Automatically select a contrasting color for all axes"));
+  vsiTabLayout->addWidget(vsiAxesColor);
+
   const QColor backgroundColor = m_mdSettings.getUserSettingBackgroundColor();
   vsiDefaultBackground->setColor(backgroundColor);
 
@@ -2723,6 +2732,10 @@ void ConfigDialog::updateMdPlottingSettings() {
     m_mdSettings.setUserSettingColorMap(vsiDefaultColorMap->currentText());
   }
 
+  if (vsiAxesColor) {
+    m_mdSettings.setUserSettingAutoColorAxes(vsiAxesColor->isChecked());
+  }
+
   // Read if the usage of the last color map and background color should be
   // performed
   if (mdPlottingVsiFrameBottom->isChecked()) {
diff --git a/MantidPlot/src/ConfigDialog.h b/MantidPlot/src/ConfigDialog.h
index e795145e2acd681c176c82cdc12d41f23707524f..8e41e3434007aaa92de55a0358760c72e1bf8042 100644
--- a/MantidPlot/src/ConfigDialog.h
+++ b/MantidPlot/src/ConfigDialog.h
@@ -238,7 +238,7 @@ private:
       *lblGeneralDefaultColorMap, *lblBoxGeneralDefaultColorMap,
       *lblVsiLastSession, *lblVsiInitialView;
   ColorButton *vsiDefaultBackground;
-  QGroupBox *mdPlottingGeneralFrame, *mdPlottingVsiFrameBottom;
+  QGroupBox *mdPlottingGeneralFrame, *mdPlottingVsiFrameBottom, *vsiAxesColor;
   QCheckBox *vsiLastSession;
   MantidQt::API::MdSettings m_mdSettings;
 
diff --git a/MantidPlot/src/Mantid/MantidCurve.h b/MantidPlot/src/Mantid/MantidCurve.h
index 9572c104f2d37d9336242db4b62052851e3b8ac5..2a31a246de2f67ea08d164ed15855024bda36119 100644
--- a/MantidPlot/src/Mantid/MantidCurve.h
+++ b/MantidPlot/src/Mantid/MantidCurve.h
@@ -107,7 +107,8 @@ private:
   mutable QwtDoubleRect m_boundingRect;
 
   // To ensure that all MantidCurves can work with Mantid Workspaces.
-  virtual void init(Graph *g, bool distr, GraphOptions::CurveType style) = 0;
+  virtual void init(Graph *g, bool distr, GraphOptions::CurveType style,
+                    bool multileSpectra = false) = 0;
 };
 
 #endif
diff --git a/MantidPlot/src/Mantid/MantidGroupPlotGenerator.h b/MantidPlot/src/Mantid/MantidGroupPlotGenerator.h
deleted file mode 100644
index 051ef36ce3c192bd20a09718043102cc9589554a..0000000000000000000000000000000000000000
--- a/MantidPlot/src/Mantid/MantidGroupPlotGenerator.h
+++ /dev/null
@@ -1,86 +0,0 @@
-#ifndef MANTIDGROUPPLOTGENERATOR_H_
-#define MANTIDGROUPPLOTGENERATOR_H_
-
-#include "Graph3D.h"
-#include "MantidAPI/NumericAxis.h"
-#include "MantidAPI/WorkspaceGroup_fwd.h"
-#include "MantidMatrix.h"
-#include <MantidQtMantidWidgets/MantidSurfacePlotDialog.h>
-
-/**
-* This utility class generates a surface or contour plot from a group of
-* workspaces.
-*/
-class MantidGroupPlotGenerator {
-public:
-  /// Constructor
-  explicit MantidGroupPlotGenerator(
-      MantidQt::MantidWidgets::MantidDisplayBase *mantidUI);
-
-  /// Plots a surface from the given workspace group
-  void plotSurface(
-      const Mantid::API::WorkspaceGroup_const_sptr &wsGroup,
-      const MantidQt::MantidWidgets::MantidSurfacePlotDialog::UserInputSurface &
-          options) const;
-
-  /// Plots a contour plot from the given workspace group
-  void plotContour(
-      const Mantid::API::WorkspaceGroup_const_sptr &wsGroup,
-      const MantidQt::MantidWidgets::MantidSurfacePlotDialog::UserInputSurface &
-          options) const;
-
-  /// Tests if WorkspaceGroup contains only MatrixWorkspaces
-  static bool groupIsAllMatrixWorkspaces(
-      const Mantid::API::WorkspaceGroup_const_sptr &wsGroup);
-
-  /// Validates the given options and returns an error string
-  static std::string validatePlotOptions(
-      MantidQt::MantidWidgets::MantidSurfacePlotDialog::UserInputSurface &
-          options,
-      int nWorkspaces);
-
-  /// Tests if WorkspaceGroup contents all have same X for given spectrum
-  static bool
-  groupContentsHaveSameX(const Mantid::API::WorkspaceGroup_const_sptr &wsGroup,
-                         const size_t index);
-
-private:
-  /// Type of graph to plot
-  enum class Type { Surface, Contour };
-
-  /// Plots a graph from the given workspace group
-  void plot(
-      Type graphType, const Mantid::API::WorkspaceGroup_const_sptr &wsGroup,
-      const MantidQt::MantidWidgets::MantidSurfacePlotDialog::UserInputSurface &
-          options) const;
-
-  /// Creates a single workspace to plot from
-  const Mantid::API::MatrixWorkspace_sptr createWorkspaceForGroupPlot(
-      Type graphType,
-      boost::shared_ptr<const Mantid::API::WorkspaceGroup> wsGroup,
-      const MantidQt::MantidWidgets::MantidSurfacePlotDialog::UserInputSurface &
-          options) const;
-
-  /// Returns a single log value from the given workspace
-  double
-  getSingleLogValue(int wsIndex,
-                    const Mantid::API::MatrixWorkspace_const_sptr &matrixWS,
-                    const QString &logName) const;
-
-  /// Returns a single log value from supplied custom log
-  double getSingleLogValue(int wsIndex, const std::set<double> &values) const;
-
-  /// Get X axis title
-  QString getXAxisTitle(
-      const boost::shared_ptr<const Mantid::API::WorkspaceGroup> wsGroup) const;
-
-  /// Validate chosen workspaces/spectra
-  void validateWorkspaceChoices(
-      const boost::shared_ptr<const Mantid::API::WorkspaceGroup> wsGroup,
-      const size_t spectrum) const;
-
-  /// Pointer to the Mantid UI
-  MantidQt::MantidWidgets::MantidDisplayBase *const m_mantidUI;
-};
-
-#endif
diff --git a/MantidPlot/src/Mantid/MantidMDCurve.cpp b/MantidPlot/src/Mantid/MantidMDCurve.cpp
index 06aaa39dccc150e05685aa1da10cf14e3e4c0ba2..c4c5353a2ab7073a988be30b7757cb2d3c00ef24 100644
--- a/MantidPlot/src/Mantid/MantidMDCurve.cpp
+++ b/MantidPlot/src/Mantid/MantidMDCurve.cpp
@@ -50,11 +50,16 @@ MantidMDCurve::MantidMDCurve(const MantidMDCurve &c)
 
 /**
  *  @param g :: The Graph widget which will display the curve
- *  @param distr :: True if this is a distribution
+ *  @param distr :: True if this is a distribution,
+ *  not applicable here.
  *  @param style :: The graph style to use
+ *  @param multipleSpectra :: True if there are multiple spectra,
+ *  not applicable here.
  */
-void MantidMDCurve::init(Graph *g, bool distr, GraphOptions::CurveType style) {
+void MantidMDCurve::init(Graph *g, bool distr, GraphOptions::CurveType style,
+                         bool multipleSpectra) {
   UNUSED_ARG(distr);
+  UNUSED_ARG(multipleSpectra);
   IMDWorkspace_const_sptr ws = boost::dynamic_pointer_cast<IMDWorkspace>(
       AnalysisDataService::Instance().retrieve(m_wsName.toStdString()));
   if (!ws) {
diff --git a/MantidPlot/src/Mantid/MantidMDCurve.h b/MantidPlot/src/Mantid/MantidMDCurve.h
index 813692275e272ac37ffbd3b73bc7daa4d1a53010..adbe67f34fce6e7d278f3d11402e30d9d2856bf1 100644
--- a/MantidPlot/src/Mantid/MantidMDCurve.h
+++ b/MantidPlot/src/Mantid/MantidMDCurve.h
@@ -90,7 +90,8 @@ private:
   using PlotCurve::draw; // Avoid Intel compiler warning
 
   /// Init the curve
-  void init(Graph *g, bool distr, GraphOptions::CurveType style) override;
+  void init(Graph *g, bool distr, GraphOptions::CurveType style,
+            bool multipleSpectra = false) override;
 
   /// Handles delete notification
   void postDeleteHandle(const std::string &wsName) override {
diff --git a/MantidPlot/src/Mantid/MantidMatrix.cpp b/MantidPlot/src/Mantid/MantidMatrix.cpp
index 7da2478f098100c867dbc652bd2c8e57f78d13f6..2d05ad95e4ca9ea965464fcc0755d96227904d21 100644
--- a/MantidPlot/src/Mantid/MantidMatrix.cpp
+++ b/MantidPlot/src/Mantid/MantidMatrix.cpp
@@ -566,8 +566,12 @@ QString MantidMatrix::workspaceName() const {
 }
 
 QwtDoubleRect MantidMatrix::boundingRect() {
+  const int defaultNumberSpectroGramRows = 700;
+  const int defaultNumberSpectroGramColumns = 700;
   if (m_boundingRect.isNull()) {
-    m_spectrogramRows = numRows() > 100 ? numRows() : 100;
+    m_spectrogramRows = numRows() > defaultNumberSpectroGramRows
+                            ? numRows()
+                            : defaultNumberSpectroGramRows;
 
     // This is only meaningful if a 2D (or greater) workspace
     if (m_workspace->axes() > 1) {
@@ -634,10 +638,12 @@ QwtDoubleRect MantidMatrix::boundingRect() {
           }
         }
         m_spectrogramCols = static_cast<int>((x_end - x_start) / ddx);
-        if (m_spectrogramCols < 100)
-          m_spectrogramCols = 100;
+        if (m_spectrogramCols < defaultNumberSpectroGramColumns)
+          m_spectrogramCols = defaultNumberSpectroGramColumns;
       } else {
-        m_spectrogramCols = numCols() > 100 ? numCols() : 100;
+        m_spectrogramCols = numCols() > defaultNumberSpectroGramColumns
+                                ? numCols()
+                                : defaultNumberSpectroGramColumns;
       }
       m_boundingRect = QwtDoubleRect(qMin(x_start, x_end) - 0.5 * dx,
                                      qMin(y_start, y_end) - 0.5 * dy,
diff --git a/MantidPlot/src/Mantid/MantidMatrixCurve.cpp b/MantidPlot/src/Mantid/MantidMatrixCurve.cpp
index cf38b44252fdf94650efe1afe3b7a0c68b176227..2868de07d930febdee6289b9e95914e5d756ed25 100644
--- a/MantidPlot/src/Mantid/MantidMatrixCurve.cpp
+++ b/MantidPlot/src/Mantid/MantidMatrixCurve.cpp
@@ -28,6 +28,7 @@ Mantid::Kernel::Logger g_log("MantidMatrixCurve");
 
 /**
  *  @param name :: The curve's name - shown in the legend
+ *default name is used, if empty.
  *  @param wsName :: The workspace name.
  *  @param g :: The Graph widget which will display the curve
  *  @param index :: The index of the spectrum or bin in the workspace
@@ -36,22 +37,27 @@ Mantid::Kernel::Logger g_log("MantidMatrixCurve");
  *  @param err :: True if the errors are to be plotted
  *  @param distr :: True if it is a distribution
  *  @param style :: CurveType style to use
+ *  @param multipleSpectra :: indicates that there are multiple spectra and
+ *  so spectrum numbers must always be shown in the plot legend.
  *..@throw Mantid::Kernel::Exception::NotFoundError if the workspace cannot be
  *found
  *  @throw std::invalid_argument if the index is out of range for the given
  *workspace
  */
-MantidMatrixCurve::MantidMatrixCurve(const QString &, const QString &wsName,
+MantidMatrixCurve::MantidMatrixCurve(const QString &name, const QString &wsName,
                                      Graph *g, int index, IndexDir indexType,
                                      bool err, bool distr,
-                                     GraphOptions::CurveType style)
+                                     GraphOptions::CurveType style,
+                                     bool multipleSpectra)
     : MantidCurve(err), m_wsName(wsName), m_index(index),
       m_indexType(indexType) {
   if (!g) {
     throw std::invalid_argument("MantidMatrixCurve::MantidMatrixCurve - NULL "
                                 "graph pointer not allowed");
   }
-  init(g, distr, style);
+  if (!name.isEmpty())
+    this->setTitle(name);
+  init(g, distr, style, multipleSpectra);
 }
 
 /**
@@ -91,9 +97,12 @@ MantidMatrixCurve::MantidMatrixCurve(const MantidMatrixCurve &c)
  *  @param g :: The Graph widget which will display the curve
  *  @param distr :: True for a distribution
  *  @param style :: The curve type to use
+ *  @param multipleSpectra :: indicates that there are multiple spectra and
+ *  so spectrum numbers must always be shown in the plot legend.
  */
 void MantidMatrixCurve::init(Graph *g, bool distr,
-                             GraphOptions::CurveType style) {
+                             GraphOptions::CurveType style,
+                             bool multipleSpectra) {
   // Will throw if name not found but return NULL ptr if the type is incorrect
   MatrixWorkspace_const_sptr workspace =
       AnalysisDataService::Instance().retrieveWS<MatrixWorkspace>(
@@ -110,14 +119,21 @@ void MantidMatrixCurve::init(Graph *g, bool distr,
   }
 
   // Set the curve name if it the non-naming constructor was called
+  // or the naming constructor was called with empty name.
   if (this->title().isEmpty()) {
-    // If there's only one spectrum in the workspace, title is simply workspace
+    // If there's only one histrogram in the workspace, title is simply
+    // workspace
     // name
     if (workspace->getNumberHistograms() == 1)
       this->setTitle(m_wsName);
     else
-      this->setTitle(createCurveName(workspace));
+      this->setTitle(createCurveName("", workspace));
+  } else if (multipleSpectra) {
+    this->setTitle(createCurveName(this->title().text(), workspace));
   }
+  // Here we have to catch the case when there is more than on spectrum and
+  // append the spectrum name like in createCurveName(Workspace).
+  // Perhaps, with new CreateCurveName(this->title(), Workspace)
 
   Mantid::API::MatrixWorkspace_const_sptr matrixWS =
       boost::dynamic_pointer_cast<const Mantid::API::MatrixWorkspace>(
@@ -262,15 +278,24 @@ void MantidMatrixCurve::itemChanged() {
 
 /**
  * Create the name for a curve from the following input:
+ * @param prefix :: prefix for name, if empty, workspace name is used.
  * @param ws :: Pointer to workspace
  */
 QString MantidMatrixCurve::createCurveName(
+    const QString &prefix,
     const boost::shared_ptr<const Mantid::API::MatrixWorkspace> ws) {
-  QString name = m_wsName + "-";
+  QString name = "";
+
+  if (prefix.isEmpty())
+    name += m_wsName + "-";
+  else
+    name += prefix + "-";
+
   if (m_indexType == Spectrum)
     name += QString::fromStdString(ws->getAxis(1)->label(m_index));
   else
     name += "bin-" + QString::number(m_index);
+
   return name;
 }
 
diff --git a/MantidPlot/src/Mantid/MantidMatrixCurve.h b/MantidPlot/src/Mantid/MantidMatrixCurve.h
index 04be27f88d3a3db2ca3ca798df2f2891b5c17dfe..1aa7922e8c0fa0e562911b2d5605812a28b101a7 100644
--- a/MantidPlot/src/Mantid/MantidMatrixCurve.h
+++ b/MantidPlot/src/Mantid/MantidMatrixCurve.h
@@ -53,7 +53,8 @@ public:
   MantidMatrixCurve(const QString &name, const QString &wsName, Graph *g,
                     int index, IndexDir indexType, bool err = false,
                     bool distr = false,
-                    GraphOptions::CurveType style = GraphOptions::Unspecified);
+                    GraphOptions::CurveType style = GraphOptions::Unspecified,
+                    bool multipleSpectra = false);
 
   /// More complex constructor setting some defaults for the curve
   MantidMatrixCurve(const QString &wsName, Graph *g, int index,
@@ -129,7 +130,8 @@ private:
   using PlotCurve::draw; // Avoid Intel compiler warning
 
   /// Init the curve
-  void init(Graph *g, bool distr, GraphOptions::CurveType style) override;
+  void init(Graph *g, bool distr, GraphOptions::CurveType style,
+            bool multipleSpectra = false) override;
 
   /// Handles delete notification
   void postDeleteHandle(const std::string &wsName) override {
@@ -157,6 +159,7 @@ private slots:
 private:
   /// Make the curve name
   QString createCurveName(
+      const QString &prefix,
       const boost::shared_ptr<const Mantid::API::MatrixWorkspace> ws);
 
   QString
diff --git a/MantidPlot/src/Mantid/MantidMatrixFunction.cpp b/MantidPlot/src/Mantid/MantidMatrixFunction.cpp
index 78bb78f184bc19e8667a4825d62e368fa55e1fc3..678a3b634a84ac1c831ef350c8b356fa6c7074f4 100644
--- a/MantidPlot/src/Mantid/MantidMatrixFunction.cpp
+++ b/MantidPlot/src/Mantid/MantidMatrixFunction.cpp
@@ -70,10 +70,11 @@ double MantidMatrixFunction::operator()(double x, double y) {
 
   size_t j = indexX(i, x);
 
-  if (j < columns())
+  if (j < columns()) {
     return m_workspace->y(i)[j];
-  else
+  } else {
     return m_outside;
+  }
 }
 
 double MantidMatrixFunction::getMinPositiveValue() const {
@@ -149,32 +150,77 @@ MantidMatrixFunction::getHistogramX(int row) const {
   return m_workspace->x(row);
 }
 
-size_t MantidMatrixFunction::indexX(size_t row, double s) const {
-  size_t n = m_workspace->blocksize();
-
+/**
+ * Performs a binary search for an x value in the x data of a particular
+ * spectrum. There are two scenarios to consider which are illustrated by
+ * examples
+ *
+ * 1. Histogram Data:
+ * The x value of the example is 6500
+ *
+ * Y:       6      6       16        6         6
+ * X: 2000    4000    8000    12000     16000     20000
+ *
+ * The algorithm will determine that the index of X which is closest to 6500 is
+ *2,
+ * but the Y index with the correct data is 1 (since the value should be 6 not
+ *16)
+ *
+ * 2. Point Data:
+ * Y:   6      6       16        6         6
+ * X: 2000    4000    8000    12000     16000
+ *
+ * The algorithm will determine that the index of X which is closest to 6500 is
+ *2,
+ * and the Y index with the correct data is 2 as well since there is a
+ *one-to-one
+ * mapping between the indices of Y and X.
+ *
+ * @param row: the workspace index to search in
+ * @param xValue: the value to search for
+ * @return the index of the Y data which is associated with the x value.
+ */
+size_t MantidMatrixFunction::indexX(size_t row, double xValue) const {
+  auto isHistogram = m_workspace->isHistogramData();
   const auto &X = m_workspace->x(row);
-  if (n == 0 || s < X[0] || s > X[n - 1])
+  const auto n = X.size();
+
+  auto provideIndexForPointData =
+      [&X](size_t start, size_t stop, double xValue, double midValue) {
+        if (fabs(X[stop] - xValue) < fabs(midValue - xValue))
+          return stop;
+        return start;
+      };
+
+  if (n == 0 || xValue < X[0] || xValue > X[n - 1]) {
     return std::numeric_limits<size_t>::max();
+  }
 
-  size_t i = 0, j = n - 1, k = n / 2;
+  size_t start = 0, stop = n - 1, mid = n / 2;
   for (size_t it = 0; it < n; it++) {
-    const double ss = X[k];
-    if (ss == s)
-      return k;
-    if (abs(static_cast<int>(i) - static_cast<int>(j)) < 2) {
-      double ds = fabs(ss - s);
-      if (fabs(X[j] - s) < ds)
-        return j;
-      return i;
+    const double midValue = X[mid];
+    if (midValue == xValue)
+      return mid;
+
+    // If we reach two neighbouring x values, then we need to decide
+    // which index to pick.
+    if (abs(static_cast<int>(start) - static_cast<int>(stop)) < 2) {
+      if (isHistogram) {
+        return start;
+      } else {
+        return provideIndexForPointData(start, stop, xValue, midValue);
+      }
     }
-    if (s > ss)
-      i = k;
+
+    // Reset the interval to search
+    if (xValue > midValue)
+      start = mid;
     else
-      j = k;
-    k = i + (j - i) / 2;
+      stop = mid;
+    mid = start + (stop - start) / 2;
   }
 
-  return i;
+  return start;
 }
 
 size_t MantidMatrixFunction::indexY(double s) const {
diff --git a/MantidPlot/src/Mantid/MantidMatrixFunction.h b/MantidPlot/src/Mantid/MantidMatrixFunction.h
index d47da242fb763196d77a58b8a96698eadf30f5e0..65b5dcdf9dc9e7ba54313a5218f66588ebfe4b51 100644
--- a/MantidPlot/src/Mantid/MantidMatrixFunction.h
+++ b/MantidPlot/src/Mantid/MantidMatrixFunction.h
@@ -75,7 +75,7 @@ private:
 
   void init(const Mantid::API::MatrixWorkspace_const_sptr &workspace);
   void reset(const Mantid::API::MatrixWorkspace_const_sptr &workspace);
-  size_t indexX(size_t row, double s) const;
+  size_t indexX(size_t row, double xValue) const;
   size_t indexY(double s) const;
 
   /* Data */
diff --git a/MantidPlot/src/Mantid/MantidPlotUtilities.cpp b/MantidPlot/src/Mantid/MantidPlotUtilities.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..38abb9f165dadf952a9f1ff88cd233fb5445d030
--- /dev/null
+++ b/MantidPlot/src/Mantid/MantidPlotUtilities.cpp
@@ -0,0 +1,75 @@
+#include "MantidPlotUtilities.h"
+
+#include "MantidAPI/MatrixWorkspace.h"
+#include "MantidAPI/Run.h"
+#include "MantidAPI/WorkspaceFactory.h"
+#include "MantidAPI/WorkspaceGroup.h"
+#include "MantidGeometry/MDGeometry/IMDDimension.h"
+#include <MantidQtMantidWidgets/MantidDisplayBase.h>
+
+using namespace MantidQt::MantidWidgets;
+using Mantid::API::WorkspaceGroup_const_sptr;
+using Mantid::API::WorkspaceGroup_sptr;
+using Mantid::API::MatrixWorkspace_const_sptr;
+using Mantid::API::MatrixWorkspace_sptr;
+using Mantid::API::MatrixWorkspace;
+using Mantid::API::ExperimentInfo;
+using Mantid::HistogramData::Histogram;
+
+/**Compare two CurveSpecs to sort according to log value and
+* if equal by workspace index.
+* @param lhs left hand comparee
+* @param rhs right hand comparee
+* @returns true if right hand comparee has greater log value than left hand
+* comparee
+*/
+bool byLogValue(const CurveSpec &lhs, const CurveSpec &rhs) {
+  if (lhs.logVal == rhs.logVal)
+    return (lhs.index < rhs.index);
+  return (lhs.logVal < rhs.logVal);
+}
+
+/**
+ * Gets the given log value from the given workspace as a double.
+ * Should be a single-valued log!
+ * @param wsIndex :: [input] Index of workspace in group
+ * @param matrixWS :: [input] Workspace to find log from
+ * @param logName :: [input] Name of log
+ * @returns log value as a double, or workspace index
+ * @throws invalid_argument if log is wrong type or not present
+ */
+double getSingleWorkspaceLogValue(
+    size_t wsIndex, const Mantid::API::MatrixWorkspace_const_sptr &matrixWS,
+    const QString &logName) {
+  if (logName == MantidWSIndexWidget::WORKSPACE_INDEX || logName == "")
+    return static_cast<double>(wsIndex); // cast for plotting
+
+  // MatrixWorkspace is an ExperimentInfo
+  auto log = matrixWS->run().getLogData(logName.toStdString());
+  if (!log)
+    throw std::invalid_argument("Log not present in workspace");
+  if (dynamic_cast<Mantid::Kernel::PropertyWithValue<int> *>(log) ||
+      dynamic_cast<Mantid::Kernel::PropertyWithValue<double> *>(log))
+    return std::stod(log->value());
+
+  throw std::invalid_argument(
+      "Log is of wrong type (expected single numeric value");
+}
+
+/**
+* Gets the custom, user-provided log value of the given index.
+* i.e. the nth in order from smallest to largest.
+* If the index is outside the range, returns 0.
+* @param wsIndex :: [input] Index of log value to use
+* @param logValues :: [input] User-provided set of log values
+* @returns Numeric log value
+*/
+double getSingleWorkspaceLogValue(size_t wsIndex,
+                                  const std::set<double> &logValues) {
+  if (wsIndex >= logValues.size())
+    return 0;
+
+  auto it = logValues.begin();
+  std::advance(it, wsIndex);
+  return *it;
+}
diff --git a/MantidPlot/src/Mantid/MantidPlotUtilities.h b/MantidPlot/src/Mantid/MantidPlotUtilities.h
new file mode 100644
index 0000000000000000000000000000000000000000..1093cdd1c88740853d102b0198f134e3716dca89
--- /dev/null
+++ b/MantidPlot/src/Mantid/MantidPlotUtilities.h
@@ -0,0 +1,28 @@
+#ifndef MANTIDPLOTUTILITIES_H_
+#define MANTIDPLOTUTILITIES_H_
+
+#include "MantidQtMantidWidgets/MantidWSIndexDialog.h"
+
+/**
+* These utilities assist with plotting in Mantid
+*/
+/// Structure to aid ordering of plots
+struct CurveSpec {
+  double logVal;
+  QString wsName;
+  int index;
+};
+
+/// Compare to sort according to log value
+bool byLogValue(const CurveSpec &lhs, const CurveSpec &rhs);
+
+/// Returns a single log value from the given workspace
+double getSingleWorkspaceLogValue(
+    size_t wsIndex, const Mantid::API::MatrixWorkspace_const_sptr &matrixWS,
+    const QString &logName);
+
+/// Returns a single custom log value
+double getSingleWorkspaceLogValue(size_t wsIndex,
+                                  const std::set<double> &logValues);
+
+#endif // MANTIDPLOTUTILITIES_H_
diff --git a/MantidPlot/src/Mantid/MantidGroupPlotGenerator.cpp b/MantidPlot/src/Mantid/MantidSurfaceContourPlotGenerator.cpp
similarity index 55%
rename from MantidPlot/src/Mantid/MantidGroupPlotGenerator.cpp
rename to MantidPlot/src/Mantid/MantidSurfaceContourPlotGenerator.cpp
index be6502706e3f8c997350a5fb5dd8952e6b1dbb24..12b5d7c086554187b1c9ea9143a7bf78a4e75f93 100644
--- a/MantidPlot/src/Mantid/MantidGroupPlotGenerator.cpp
+++ b/MantidPlot/src/Mantid/MantidSurfaceContourPlotGenerator.cpp
@@ -1,9 +1,10 @@
-#include "MantidGroupPlotGenerator.h"
+#include "MantidSurfaceContourPlotGenerator.h"
 
 #include "MantidAPI/MatrixWorkspace.h"
 #include "MantidAPI/Run.h"
 #include "MantidAPI/WorkspaceFactory.h"
 #include "MantidAPI/WorkspaceGroup.h"
+#include "MantidPlotUtilities.h"
 #include "MantidGeometry/MDGeometry/IMDDimension.h"
 #include <MantidQtMantidWidgets/MantidDisplayBase.h>
 
@@ -20,75 +21,103 @@ using Mantid::HistogramData::Histogram;
  * Constructor
  * @param mantidUI :: [input] Pointer to the Mantid UI
  */
-MantidGroupPlotGenerator::MantidGroupPlotGenerator(MantidDisplayBase *mantidUI)
+MantidSurfaceContourPlotGenerator::MantidSurfaceContourPlotGenerator(
+    MantidDisplayBase *mantidUI)
     : m_mantidUI(mantidUI) {}
 
 /**
  * Plots a surface graph from the given workspace group
- * @param wsGroup :: [input] Workspace group to plot
- * @param options :: [input] User-selected plot options
+ * @param accepted :: [input] true if plot has been accepted
+ * @param plotIndex :: [input] plot index
+ * @param axisName :: [input] axis name
+ * @param logName :: [input] log name
+ * @param customLogValues :: [input] custom log values
+ * @param workspaces :: [input] set of workspaces forming the group to be
+ * plotted
  */
-void MantidGroupPlotGenerator::plotSurface(
-    const WorkspaceGroup_const_sptr &wsGroup,
-    const MantidSurfacePlotDialog::UserInputSurface &options) const {
-  plot(Type::Surface, wsGroup, options);
+void MantidSurfaceContourPlotGenerator::plotSurface(
+    bool accepted, int plotIndex, const QString &axisName,
+    const QString &logName, const std::set<double> &customLogValues,
+    const std::vector<Mantid::API::MatrixWorkspace_const_sptr> &workspaces)
+    const {
+  plot(Type::Surface, accepted, plotIndex, axisName, logName, customLogValues,
+       workspaces);
 }
 
 /**
  * Plots a contour plot from the given workspace group
- * @param wsGroup :: [input] Workspace group to plot
- * @param options :: [input] User-selected plot options
+ * @param accepted :: [input] true if plot has been accepted
+ * @param plotIndex :: [input] plot index
+ * @param axisName :: [input] axis name
+ * @param logName :: [input] log name
+ * @param customLogValues :: [input] custom log values
+ * @param workspaces :: [input] set of workspaces forming the group to be
+ * plotted
  */
-void MantidGroupPlotGenerator::plotContour(
-    const WorkspaceGroup_const_sptr &wsGroup,
-    const MantidSurfacePlotDialog::UserInputSurface &options) const {
-  plot(Type::Contour, wsGroup, options);
+void MantidSurfaceContourPlotGenerator::plotContour(
+    bool accepted, int plotIndex, const QString &axisName,
+    const QString &logName, const std::set<double> &customLogValues,
+    const std::vector<Mantid::API::MatrixWorkspace_const_sptr> &workspaces)
+    const {
+  plot(Type::Contour, accepted, plotIndex, axisName, logName, customLogValues,
+       workspaces);
 }
 
 /**
- * Plots a graph from the given workspace group
+ * Plots a contour or surface graph from the given workspace group
  * @param graphType :: [input] Type of graph to plot
- * @param wsGroup :: [input] Workspace group to plot
- * @param options :: [input] User-selected plot options
+ * @param accepted :: [input] true if plot has been accepted
+ * @param plotIndex :: [input] plot index
+ * @param axisName :: [input] axis name
+ * @param logName :: [input] log name
+ * @param customLogValues :: [input] custom log values
+ * @param workspaces :: [input] set of workspaces forming the group to be
+ * plotted
  */
-void MantidGroupPlotGenerator::plot(
-    Type graphType, const WorkspaceGroup_const_sptr &wsGroup,
-    const MantidSurfacePlotDialog::UserInputSurface &options) const {
-  if (wsGroup && options.accepted) {
+void MantidSurfaceContourPlotGenerator::plot(
+    Type graphType, bool accepted, int plotIndex, const QString &axisName,
+    const QString &logName, const std::set<double> &customLogValues,
+    const std::vector<Mantid::API::MatrixWorkspace_const_sptr> &workspaces)
+    const {
+  if (!workspaces.empty() && accepted) {
+
     // Set up one new matrix workspace to hold all the data for plotting
     MatrixWorkspace_sptr matrixWS;
     try {
-      matrixWS = createWorkspaceForGroupPlot(graphType, wsGroup, options);
+      matrixWS = createWorkspaceForGroupPlot(graphType, workspaces, plotIndex,
+                                             logName, customLogValues);
     } catch (const std::logic_error &err) {
       m_mantidUI->showCritical(err.what());
       return;
-    }
+    } // We can now assume every workspace is a Matrix Workspace
 
     // Generate X axis title
-    const auto &xLabelQ = getXAxisTitle(wsGroup);
+    const auto &xLabelQ = getXAxisTitle(workspaces);
 
     // Import the data for plotting
     auto matrixToPlot =
         m_mantidUI->importMatrixWorkspace(matrixWS, -1, -1, false);
 
     // Change the default plot title
-    QString title = QString("plot for %1, spectrum %2")
-                        .arg(wsGroup->getName().c_str(),
-                             QString::number(options.plotIndex));
+    QString title =
+        QString("plot for %1, spectrum %2")
+            .arg(workspaces[0]->getName().c_str(), QString::number(plotIndex));
+    // For the time being we use the name of the first workspace.
+    // Later we need a way of conveying a name for this set of workspaces.
 
     // Plot the correct type of graph
     if (graphType == Type::Surface) {
       auto plot = matrixToPlot->plotGraph3D(Qwt3D::PLOTSTYLE::FILLED);
       plot->setTitle(QString("Surface ").append(title));
       plot->setXAxisLabel(xLabelQ);
-      plot->setYAxisLabel(options.axisName);
+      plot->setYAxisLabel(axisName);
       plot->setResolution(1); // If auto-set too high, appears empty
     } else if (graphType == Type::Contour) {
       MultiLayer *plot =
           matrixToPlot->plotGraph2D(GraphOptions::ColorMapContour);
-      plot->activeGraph()->setXAxisTitle(xLabelQ);
-      plot->activeGraph()->setYAxisTitle(options.axisName);
       plot->activeGraph()->setTitle(QString("Contour ").append(title));
+      plot->activeGraph()->setXAxisTitle(xLabelQ);
+      plot->activeGraph()->setYAxisTitle(axisName);
     }
   }
 }
@@ -101,31 +130,32 @@ void MantidGroupPlotGenerator::plot(
  * Table or Peaks workspaces then it cannot be used.
  *
  * @param graphType :: [input] Type of graph to plot
- * @param wsGroup :: [input] Pointer to workspace group to use as input
- * @param options :: [input] User input from dialog
+ * @param workspaces :: [input] set of workspaces forming the group to be
+ *plotted
+ * @param plotIndex :: [input] plot index
+ * @param logName :: [input] log name
+ * @param customLogValues :: [input] custom log
  * @returns Pointer to the created workspace
  */
 const MatrixWorkspace_sptr
-MantidGroupPlotGenerator::createWorkspaceForGroupPlot(
-    Type graphType, WorkspaceGroup_const_sptr wsGroup,
-    const MantidSurfacePlotDialog::UserInputSurface &options) const {
-  const auto index = static_cast<size_t>(
-      options.plotIndex);                // which spectrum to plot from each WS
-  const auto &logName = options.logName; // Log to read for axis of XYZ plot
+MantidSurfaceContourPlotGenerator::createWorkspaceForGroupPlot(
+    Type graphType,
+    const std::vector<Mantid::API::MatrixWorkspace_const_sptr> &workspaces,
+    int plotIndex, const QString &logName,
+    const std::set<double> &customLogValues) const {
+  const auto index =
+      static_cast<size_t>(plotIndex); // which spectrum to plot from each WS
 
-  validateWorkspaceChoices(wsGroup, index);
+  validateWorkspaceChoices(workspaces, index);
 
   // Create workspace to hold the data
   // Each "spectrum" will be the data from one workspace
-  const auto nWorkspaces = wsGroup->getNumberOfEntries();
-  if (nWorkspaces < 0) {
-    return MatrixWorkspace_sptr();
-  }
+  const auto nWorkspaces = workspaces.size();
 
   MatrixWorkspace_sptr matrixWS; // Workspace to return
   // Cast succeeds: have already checked group contains only MatrixWorkspaces
   const auto firstWS =
-      boost::dynamic_pointer_cast<const MatrixWorkspace>(wsGroup->getItem(0));
+      boost::dynamic_pointer_cast<const MatrixWorkspace>(workspaces[0]);
 
   // If we are making a surface plot, create a point data workspace.
   // If it's a contour plot, make a histo workspace.
@@ -139,9 +169,9 @@ MantidGroupPlotGenerator::createWorkspaceForGroupPlot(
 
   // For each workspace in group, add data and log values
   std::vector<double> logValues;
-  for (int i = 0; i < nWorkspaces; i++) {
+  for (size_t i = 0; i < nWorkspaces; i++) {
     const auto ws =
-        boost::dynamic_pointer_cast<const MatrixWorkspace>(wsGroup->getItem(i));
+        boost::dynamic_pointer_cast<const MatrixWorkspace>(workspaces[i]);
     if (ws) {
       // Make sure the X data is set as the correct mode
       if (xMode == Histogram::XMode::BinEdges) {
@@ -152,8 +182,8 @@ MantidGroupPlotGenerator::createWorkspaceForGroupPlot(
       // Y and E can be shared
       matrixWS->setSharedY(i, ws->sharedY(index));
       matrixWS->setSharedE(i, ws->sharedE(index));
-      if (logName == MantidSurfacePlotDialog::CUSTOM) {
-        logValues.push_back(getSingleLogValue(i, options.customLogValues));
+      if (logName == MantidWSIndexWidget::CUSTOM) {
+        logValues.push_back(getSingleLogValue(i, customLogValues));
       } else {
         logValues.push_back(getSingleLogValue(i, ws, logName));
       }
@@ -166,33 +196,6 @@ MantidGroupPlotGenerator::createWorkspaceForGroupPlot(
   return matrixWS;
 }
 
-/**
- * Check if the supplied group contains only MatrixWorkspaces
- * @param wsGroup :: [input] Pointer to a WorkspaceGroup
- * @returns True if contains only MatrixWorkspaces, false if contains
- * other types or is empty
- */
-bool MantidGroupPlotGenerator::groupIsAllMatrixWorkspaces(
-    const WorkspaceGroup_const_sptr &wsGroup) {
-  bool allMatrixWSes = true;
-  if (wsGroup) {
-    if (wsGroup->isEmpty()) {
-      allMatrixWSes = false;
-    } else {
-      for (int index = 0; index < wsGroup->getNumberOfEntries(); index++) {
-        if (nullptr == boost::dynamic_pointer_cast<MatrixWorkspace>(
-                           wsGroup->getItem(index))) {
-          allMatrixWSes = false;
-          break;
-        }
-      }
-    }
-  } else {
-    allMatrixWSes = false;
-  }
-  return allMatrixWSes;
-}
-
 /**
  * Gets the custom, user-provided log value of the given index.
  * i.e. the nth in order from smallest to largest.
@@ -203,15 +206,9 @@ bool MantidGroupPlotGenerator::groupIsAllMatrixWorkspaces(
  * @param logValues :: [input] User-provided set of log values
  * @returns Numeric log value
  */
-double MantidGroupPlotGenerator::getSingleLogValue(
-    int wsIndex, const std::set<double> &logValues) const {
-  double value = 0;
-  if (wsIndex < static_cast<int>(logValues.size())) {
-    auto it = logValues.begin();
-    std::advance(it, wsIndex);
-    value = *it;
-  }
-  return value;
+double MantidSurfaceContourPlotGenerator::getSingleLogValue(
+    size_t wsIndex, const std::set<double> &logValues) const {
+  return getSingleWorkspaceLogValue(wsIndex, logValues);
 }
 
 /**
@@ -223,30 +220,11 @@ double MantidGroupPlotGenerator::getSingleLogValue(
  * @returns log value as a double, or workspace index
  * @throws invalid_argument if log is wrong type or not present
  */
-double MantidGroupPlotGenerator::getSingleLogValue(
-    int wsIndex, const Mantid::API::MatrixWorkspace_const_sptr &matrixWS,
+double MantidSurfaceContourPlotGenerator::getSingleLogValue(
+    size_t wsIndex, const Mantid::API::MatrixWorkspace_const_sptr &matrixWS,
     const QString &logName) const {
-  if (logName == MantidSurfacePlotDialog::WORKSPACE_INDEX) {
-    return wsIndex;
-  } else {
-    // MatrixWorkspace is an ExperimentInfo
-    if (auto ei = boost::dynamic_pointer_cast<const ExperimentInfo>(matrixWS)) {
-      auto log = ei->run().getLogData(logName.toStdString());
-      if (log) {
-        if (dynamic_cast<Mantid::Kernel::PropertyWithValue<int> *>(log) ||
-            dynamic_cast<Mantid::Kernel::PropertyWithValue<double> *>(log)) {
-          return std::stod(log->value());
-        } else {
-          throw std::invalid_argument(
-              "Log is of wrong type (expected single numeric value");
-        }
-      } else {
-        throw std::invalid_argument("Log not present in workspace");
-      }
-    } else {
-      throw std::invalid_argument("Bad input workspace type");
-    }
-  }
+
+  return getSingleWorkspaceLogValue(wsIndex, matrixWS, logName);
 }
 
 /**
@@ -259,11 +237,12 @@ double MantidGroupPlotGenerator::getSingleLogValue(
  * @param nWorkspaces :: [input] Number of workspaces in selected group
  * @returns Error string, or empty string if no error
  */
-std::string MantidGroupPlotGenerator::validatePlotOptions(
-    MantidSurfacePlotDialog::UserInputSurface &options, int nWorkspaces) {
+std::string MantidSurfaceContourPlotGenerator::validatePlotOptions(
+    MantidQt::MantidWidgets::MantidWSIndexWidget::UserInputAdvanced &options,
+    int nWorkspaces) {
   std::stringstream err;
   if (options.accepted) {
-    if (options.logName == MantidSurfacePlotDialog::CUSTOM) {
+    if (options.logName == MantidWSIndexWidget::CUSTOM) {
       // Check number of values supplied
       if (static_cast<int>(options.customLogValues.size()) != nWorkspaces) {
         err << "Number of custom log values must be equal to "
@@ -274,21 +253,24 @@ std::string MantidGroupPlotGenerator::validatePlotOptions(
   }
   return err.str();
 }
+// This function is not currently called.
+// May be needed later on in the GUI harmonization.
 
 /**
  * Generates X axis title for graph based on first workspace in group
- * @param wsGroup :: [input] WorkspaceGroup that contains data for graph - title
- * will be generated from the X label of the first workspace in the group
+ * @param workspaces :: [input] list of workpaces containing data for graph.
+ * The title is generated from the first of these workspaces.
  * @returns :: Title for X axis of graph
  */
-QString MantidGroupPlotGenerator::getXAxisTitle(
-    const boost::shared_ptr<const Mantid::API::WorkspaceGroup> wsGroup) const {
-  if (wsGroup->getNumberOfEntries() <= 0) {
+QString MantidSurfaceContourPlotGenerator::getXAxisTitle(
+    const std::vector<Mantid::API::MatrixWorkspace_const_sptr> &workspaces)
+    const {
+  if (workspaces.empty()) {
     return QString();
   }
   const auto firstWS =
-      boost::dynamic_pointer_cast<const MatrixWorkspace>(wsGroup->getItem(
-          0)); // Already checked group contains only MatrixWorkspaces
+      boost::dynamic_pointer_cast<const MatrixWorkspace>(workspaces[0]);
+  // Already checked group contains only MatrixWorkspaces
   const auto &xAxisLabel = firstWS->getXDimension()->getName();
   const auto &xAxisUnits = firstWS->getXDimension()->getUnits().ascii();
   // Generate title for the X axis
@@ -305,23 +287,24 @@ QString MantidGroupPlotGenerator::getXAxisTitle(
  * (At the moment just tests size of X data)
  * Precondition: wsGroup contains only MatrixWorkspaces
  *
- * @param wsGroup :: [input] Group to test
+ * @param workspaces :: [input] Workspaces to test
  * @param index :: [input] Index of spectrum to test
  * @return :: True if X data same, else false.
  * @throw std::logic_error if spectrum index not contained in workspace, or if
  * wsGroup contains workspaces other than MatrixWorkspaces
  */
-bool MantidGroupPlotGenerator::groupContentsHaveSameX(
-    const Mantid::API::WorkspaceGroup_const_sptr &wsGroup, const size_t index) {
-  if (!wsGroup) {
+bool MantidSurfaceContourPlotGenerator::groupContentsHaveSameX(
+    const std::vector<Mantid::API::MatrixWorkspace_const_sptr> &workspaces,
+    const size_t index) {
+  if (workspaces.empty()) {
     return false;
   }
 
   // Check and retrieve X data for given workspace, spectrum
-  const auto getXData = [&wsGroup](const size_t workspace,
-                                   const size_t spectrum) {
-    const auto &ws = boost::dynamic_pointer_cast<MatrixWorkspace>(
-        wsGroup->getItem(workspace));
+  const auto getXData = [&workspaces](const size_t index,
+                                      const size_t spectrum) {
+    const auto &ws =
+        boost::dynamic_pointer_cast<const MatrixWorkspace>(workspaces[index]);
     if (ws) {
       if (ws->getNumberHistograms() < spectrum) {
         throw std::logic_error("Spectrum index too large for some workspaces");
@@ -334,7 +317,7 @@ bool MantidGroupPlotGenerator::groupContentsHaveSameX(
     }
   };
 
-  const auto nWorkspaces = wsGroup->size();
+  const auto nWorkspaces = workspaces.size();
   switch (nWorkspaces) {
   case 0:
     return false;
@@ -355,27 +338,22 @@ bool MantidGroupPlotGenerator::groupContentsHaveSameX(
 }
 
 /**
- * Validate the supplied workspace group and spectrum index.
+ * Validate the supplied workspaces and spectrum index.
  * - Group must not be empty
  * - Group must only contain MatrixWorkspaces
  * - Group must have same X data for all workspaces
- * @param wsGroup :: [input] Workspace group to test
+ * @param workspaces :: [input] Workspaces to test
  * @param spectrum :: [input] Spectrum index to test
  * @throws std::invalid_argument if validation fails.
  */
-void MantidGroupPlotGenerator::validateWorkspaceChoices(
-    const boost::shared_ptr<const Mantid::API::WorkspaceGroup> wsGroup,
+void MantidSurfaceContourPlotGenerator::validateWorkspaceChoices(
+    const std::vector<Mantid::API::MatrixWorkspace_const_sptr> &workspaces,
     const size_t spectrum) const {
-  if (!wsGroup || wsGroup->size() == 0) {
+  if (workspaces.empty()) {
     throw std::invalid_argument("Must provide a non-empty WorkspaceGroup");
   }
 
-  if (!groupIsAllMatrixWorkspaces(wsGroup)) {
-    throw std::invalid_argument(
-        "Input WorkspaceGroup must only contain MatrixWorkspaces");
-  }
-
-  if (!groupContentsHaveSameX(wsGroup, spectrum)) {
+  if (!groupContentsHaveSameX(workspaces, spectrum)) {
     throw std::invalid_argument(
         "Input WorkspaceGroup must have same X data for all workspaces");
   }
diff --git a/MantidPlot/src/Mantid/MantidSurfaceContourPlotGenerator.h b/MantidPlot/src/Mantid/MantidSurfaceContourPlotGenerator.h
new file mode 100644
index 0000000000000000000000000000000000000000..8f16ba50e2a4abc6ceea990838e32f32de6f5b0d
--- /dev/null
+++ b/MantidPlot/src/Mantid/MantidSurfaceContourPlotGenerator.h
@@ -0,0 +1,87 @@
+#ifndef MANTIDSURFACECONTOURPLOTGENERATOR_H_
+#define MANTIDSURFACECONTOURPLOTGENERATOR_H_
+
+#include "Graph3D.h"
+#include "MantidAPI/NumericAxis.h"
+#include "MantidAPI/WorkspaceGroup_fwd.h"
+#include "MantidMatrix.h"
+#include <MantidQtMantidWidgets/MantidWSIndexDialog.h>
+#include <MantidQtMantidWidgets/MantidDisplayBase.h>
+
+/**
+* This utility class generates a surface or contour plot from a group of
+* workspaces.
+*/
+class MantidSurfaceContourPlotGenerator {
+public:
+  /// Constructor
+  explicit MantidSurfaceContourPlotGenerator(
+      MantidQt::MantidWidgets::MantidDisplayBase *mantidUI);
+
+  /// Plots a surface from the given workspace group
+  void plotSurface(bool accepted, int plotIndex, const QString &axisName,
+                   const QString &logName,
+                   const std::set<double> &customLogValues,
+                   const std::vector<Mantid::API::MatrixWorkspace_const_sptr> &
+                       workspaces) const;
+
+  /// Plots a contour plot from the given workspace group
+  void plotContour(bool accepted, int plotIndex, const QString &axisName,
+                   const QString &logName,
+                   const std::set<double> &customLogValues,
+                   const std::vector<Mantid::API::MatrixWorkspace_const_sptr> &
+                       workspaces) const;
+
+  /// Validates the given options and returns an error string
+  static std::string validatePlotOptions(
+      MantidQt::MantidWidgets::MantidWSIndexWidget::UserInputAdvanced &options,
+      int nWorkspaces);
+
+  /// Tests if WorkspaceGroup contents all have same X for given spectrum
+  static bool groupContentsHaveSameX(
+      const std::vector<Mantid::API::MatrixWorkspace_const_sptr> &workspaces,
+      const size_t index);
+
+private:
+  /// Type of graph to plot
+  enum class Type { Surface, Contour };
+
+  /// Plots a graph from the given workspace group
+  void
+  plot(Type graphType, bool accepted, int plotIndex, const QString &axisName,
+       const QString &logName, const std::set<double> &customLogValues,
+       const std::vector<Mantid::API::MatrixWorkspace_const_sptr> &workspaces)
+      const;
+
+  /// Creates a single workspace to plot from
+  const Mantid::API::MatrixWorkspace_sptr createWorkspaceForGroupPlot(
+      Type graphType,
+      const std::vector<Mantid::API::MatrixWorkspace_const_sptr> &workspaces,
+      int plotIndex, const QString &logName,
+      const std::set<double> &customLogValues) const;
+
+  /// Returns a single log value from the given workspace
+  double
+  getSingleLogValue(size_t wsIndex,
+                    const Mantid::API::MatrixWorkspace_const_sptr &matrixWS,
+                    const QString &logName) const;
+
+  /// Returns a single log value from supplied custom log
+  double getSingleLogValue(size_t wsIndex,
+                           const std::set<double> &values) const;
+
+  /// Get X axis title
+  QString getXAxisTitle(
+      const std::vector<Mantid::API::MatrixWorkspace_const_sptr> &workspaces)
+      const;
+
+  /// Validate chosen workspaces/spectra
+  void validateWorkspaceChoices(
+      const std::vector<Mantid::API::MatrixWorkspace_const_sptr> &workspaces,
+      const size_t spectrum) const;
+
+  /// Pointer to the Mantid UI
+  MantidQt::MantidWidgets::MantidDisplayBase *const m_mantidUI;
+};
+
+#endif
diff --git a/MantidPlot/src/Mantid/MantidUI.cpp b/MantidPlot/src/Mantid/MantidUI.cpp
index 8a860f032e5248e1aa69e7accab249907d92e1f4..20acd88e7a6d380b913c0a5aef7dd74297229ec2 100644
--- a/MantidPlot/src/Mantid/MantidUI.cpp
+++ b/MantidPlot/src/Mantid/MantidUI.cpp
@@ -5,13 +5,12 @@
 #include "AlgorithmHistoryWindow.h"
 #include "AlgorithmMonitor.h"
 #include "ImportWorkspaceDlg.h"
-#include "MantidGroupPlotGenerator.h"
+#include "MantidSurfaceContourPlotGenerator.h"
 #include "MantidMDCurve.h"
 #include "MantidMDCurveDialog.h"
 #include "MantidMatrix.h"
 #include "MantidMatrixCurve.h"
 #include "MantidQtMantidWidgets/FitPropertyBrowser.h"
-#include "MantidQtMantidWidgets/MantidSurfacePlotDialog.h"
 #include "MantidQtMantidWidgets/MantidWSIndexDialog.h"
 #include "MantidSampleLogDialog.h"
 #include "MantidSampleMaterialDialog.h"
@@ -99,7 +98,6 @@ using namespace Mantid::API;
 using namespace MantidQt::API;
 using namespace MantidQt::MantidWidgets;
 using MantidQt::MantidWidgets::MantidWSIndexDialog;
-using MantidQt::MantidWidgets::MantidSurfacePlotDialog;
 using MantidQt::MantidWidgets::MantidTreeWidget;
 using Mantid::Kernel::DateAndTime;
 using MantidQt::SliceViewer::SliceViewerWindow;
@@ -190,6 +188,19 @@ GraphOptions::CurveType getCurveTypeForFitResult(const size_t spectrum) {
     return GraphOptions::CurveType::Unspecified;
   }
 }
+
+std::vector<Mantid::API::MatrixWorkspace_const_sptr>
+getWorkspacesFromAds(const QList<QString> &workspaceNames) {
+  std::vector<Mantid::API::MatrixWorkspace_const_sptr> workspaces;
+  for (auto &workspaceName : workspaceNames) {
+    Mantid::API::MatrixWorkspace_const_sptr workspace =
+        boost::dynamic_pointer_cast<const Mantid::API::MatrixWorkspace>(
+            Mantid::API::AnalysisDataService::Instance().retrieve(
+                workspaceName.toStdString()));
+    workspaces.push_back(workspace);
+  }
+  return workspaces;
+}
 }
 
 MantidUI::MantidUI(ApplicationWindow *aw)
@@ -2959,6 +2970,8 @@ MultiLayer *MantidUI::plot1D(const QStringList &ws_names,
 @param clearWindow :: Whether to clear specified plotWindow before plotting.
 Ignored if plotWindow == NULL
 @param waterfallPlot :: If true create a waterfall type plot
+@param log :: log name for advanced plotting
+@param customLogValues :: custom log values for advanced plotting
 @return NULL if failure. Otherwise, if plotWindow == NULL - created window, if
 not NULL - plotWindow
 */
@@ -2966,12 +2979,15 @@ MultiLayer *MantidUI::plot1D(const QMultiMap<QString, set<int>> &toPlot,
                              bool spectrumPlot,
                              MantidQt::DistributionFlag distr, bool errs,
                              MultiLayer *plotWindow, bool clearWindow,
-                             bool waterfallPlot) {
+                             bool waterfallPlot, const QString &log,
+                             const std::set<double> &customLogValues) {
   // Convert the list into a map (with the same workspace as key in each case)
+  bool multipleSpectra = false;
   QMultiMap<QString, int> pairs;
   // Need to iterate through the workspaces
   QMultiMap<QString, std::set<int>>::const_iterator it;
   for (it = toPlot.constBegin(); it != toPlot.constEnd(); ++it) {
+    multipleSpectra = multipleSpectra || (it.value().size() > 1);
     std::set<int>::const_reverse_iterator itSet;
     for (itSet = it->rbegin(); itSet != it->rend(); ++itSet) {
       pairs.insert(it.key(), *itSet);
@@ -2980,7 +2996,8 @@ MultiLayer *MantidUI::plot1D(const QMultiMap<QString, set<int>> &toPlot,
 
   // Pass over to the overloaded method
   return plot1D(pairs, spectrumPlot, distr, errs, GraphOptions::Unspecified,
-                plotWindow, clearWindow, waterfallPlot);
+                plotWindow, clearWindow, waterfallPlot, log, customLogValues,
+                multipleSpectra);
 }
 
 /** Create a 1d graph from the specified spectra in a MatrixWorkspace
@@ -3024,6 +3041,10 @@ MultiLayer *MantidUI::plot1D(const QString &wsName,
 @param clearWindow :: Whether to clear specified plotWindow before plotting.
 Ignored if plotWindow == NULL
 @param waterfallPlot :: If true create a waterfall type plot
+@param log :: log name for advanced plotting
+@param customLogValues :: custom log values for advanced plotting
+@param multipleSpectra :: indicates that there are multiple spectra and
+so spectrum numbers must always be shown in the plot legend.
 @return NULL if failure. Otherwise, if plotWindow == NULL - created window, if
 not NULL - plotWindow
 */
@@ -3032,7 +3053,9 @@ MultiLayer *MantidUI::plot1D(const QMultiMap<QString, int> &toPlot,
                              MantidQt::DistributionFlag distr, bool errs,
                              GraphOptions::CurveType style,
                              MultiLayer *plotWindow, bool clearWindow,
-                             bool waterfallPlot) {
+                             bool waterfallPlot, const QString &log,
+                             const std::set<double> &customLogValues,
+                             bool multipleSpectra) {
   if (toPlot.size() == 0)
     return NULL;
 
@@ -3085,26 +3108,28 @@ MultiLayer *MantidUI::plot1D(const QMultiMap<QString, int> &toPlot,
     plotAsDistribution = (distr == MantidQt::DistributionTrue);
   }
 
-  // Try to add curves to the plot
+  vector<CurveSpec> curveSpecList;
+  putLogsIntoCurveSpecs(curveSpecList, toPlot, log, customLogValues);
+
+  // Add curves to the plot
   Graph *g = ml->activeGraph();
   MantidMatrixCurve::IndexDir indexType =
       (spectrumPlot) ? MantidMatrixCurve::Spectrum : MantidMatrixCurve::Bin;
   MantidMatrixCurve *firstCurve(NULL);
-  for (QMultiMap<QString, int>::const_iterator it = toPlot.begin();
-       it != toPlot.end(); ++it) {
-    try {
-      auto *wsCurve = new MantidMatrixCurve(it.key(), g, it.value(), indexType,
-                                            errs, plotAsDistribution, style);
-      if (!firstCurve) {
-        firstCurve = wsCurve;
-        g->setNormalizable(firstCurve->isNormalizable());
-        g->setDistribution(firstCurve->isDistribution());
-      }
-    } catch (Mantid::Kernel::Exception::NotFoundError &) {
-      g_log.warning() << "Workspace " << it.key().toStdString()
-                      << " not found\n";
-    } catch (std::exception &ex) {
-      g_log.warning() << ex.what() << '\n';
+  QString logValue("");
+  for (size_t i = 0; i < curveSpecList.size(); i++) {
+
+    if (!log.isEmpty()) { // Get log value from workspace
+      logValue = logValue.number(curveSpecList[i].logVal, 'g', 6);
+    }
+
+    auto *wsCurve = new MantidMatrixCurve(
+        logValue, curveSpecList[i].wsName, g, curveSpecList[i].index, indexType,
+        errs, plotAsDistribution, style, multipleSpectra);
+    if (!firstCurve) {
+      firstCurve = wsCurve;
+      g->setNormalizable(firstCurve->isNormalizable());
+      g->setDistribution(firstCurve->isDistribution());
     }
   }
 
@@ -3132,6 +3157,54 @@ MultiLayer *MantidUI::plot1D(const QMultiMap<QString, int> &toPlot,
   return ml;
 }
 
+/* Get the log values and put into a curve spec list in preparation of
+*  the creation of the curves
+*  @param curveSpecList :: list of curve specs to recieve the logs
+*  @param toPlot :: workspaces to plot
+*  @param log :: log value
+*  @param customLogValues :: custom log values
+*/
+void MantidUI::putLogsIntoCurveSpecs(std::vector<CurveSpec> &curveSpecList,
+                                     const QMultiMap<QString, int> &toPlot,
+                                     const QString &log,
+                                     const std::set<double> &customLogValues) {
+  // Try to store log values, if needed, and prepare for sorting.
+  int i = 0;
+  for (QMultiMap<QString, int>::const_iterator it = toPlot.begin();
+       it != toPlot.end(); ++it) {
+    CurveSpec curveSpec;
+
+    try {
+      if (!log.isEmpty()) { // Get log value from workspace
+        if (!customLogValues.empty()) {
+          curveSpec.logVal = getSingleWorkspaceLogValue(i++, customLogValues);
+        } else {
+          MatrixWorkspace_const_sptr workspace =
+              AnalysisDataService::Instance().retrieveWS<MatrixWorkspace>(
+                  it.key().toStdString());
+          curveSpec.logVal = getSingleWorkspaceLogValue(1, workspace, log);
+        }
+      } else {
+        curveSpec.logVal = 0.1234; // This should not be used.
+      }
+      curveSpec.wsName = it.key();
+      curveSpec.index = it.value();
+      curveSpecList.push_back(curveSpec);
+
+    } catch (Mantid::Kernel::Exception::NotFoundError &) {
+      g_log.warning() << "Workspace " << it.key().toStdString()
+                      << " not found\n";
+    } catch (std::exception &ex) {
+      g_log.warning() << ex.what() << '\n';
+    }
+  }
+
+  // Sort curves, if log values are used
+  if (!log.isEmpty()) {
+    sort(curveSpecList.begin(), curveSpecList.end(), byLogValue);
+  }
+}
+
 /**
 * Draw a color fill plot for each of the listed workspaces. Unfortunately the
 * plotting is
@@ -3935,18 +4008,6 @@ void MantidUI::updateRecentFilesList(const QString &fname) {
   m_appWindow->updateRecentFilesList(fname);
 }
 
-MantidSurfacePlotDialog *
-MantidUI::createSurfacePlotDialog(int flags, QStringList wsNames,
-                                  const QString &plotType) {
-  QList<QString> names;
-
-  for (auto &name : wsNames)
-    names.append(name);
-
-  return new MantidSurfacePlotDialog(this, static_cast<Qt::WFlags>(flags),
-                                     names, plotType);
-}
-
 /**
  * Create a new MantidWSIndexDialog
  * @param flags :: [input] Qt::WindowFlags enum as an integer
@@ -3954,53 +4015,38 @@ MantidUI::createSurfacePlotDialog(int flags, QStringList wsNames,
  * @param showWaterfall :: [input] Whether to show "plot as waterfall" option
  * @param showPlotAll :: [input] Whether to show "plot all" button
  * @param showTiledOpt :: [input] Whether to show "tiled plot" option
+ * @param isAdvanced :: [input] Whether to do advanced plotting
  * @returns :: New dialog
  */
-MantidWSIndexDialog *MantidUI::createWorkspaceIndexDialog(int flags,
-                                                          QStringList wsNames,
-                                                          bool showWaterfall,
-                                                          bool showPlotAll,
-                                                          bool showTiledOpt) {
+MantidWSIndexDialog *
+MantidUI::createWorkspaceIndexDialog(int flags, const QStringList &wsNames,
+                                     bool showWaterfall, bool showPlotAll,
+                                     bool showTiledOpt, bool isAdvanced) {
   return new MantidWSIndexDialog(m_appWindow, static_cast<Qt::WFlags>(flags),
                                  wsNames, showWaterfall, showPlotAll,
-                                 showTiledOpt);
-}
-
-void MantidUI::showSurfacePlot() {
-  // find the workspace group clicked on
-  auto wksp = m_exploreMantid->getSelectedWorkspace();
-
-  if (wksp) {
-    const auto wsGroup =
-        boost::dynamic_pointer_cast<const WorkspaceGroup>(wksp);
-    if (wsGroup) {
-      auto options = m_exploreMantid->chooseSurfacePlotOptions(
-          wsGroup->getNumberOfEntries());
-
-      // TODO: Figure out how to get rid of MantidUI dependency here.
-      auto plotter =
-          Mantid::Kernel::make_unique<MantidGroupPlotGenerator>(this);
-      plotter->plotSurface(wsGroup, options);
-    }
-  }
-}
-
-void MantidUI::showContourPlot() {
-  auto wksp = m_exploreMantid->getSelectedWorkspace();
-
-  if (wksp) {
-    const auto wsGroup =
-        boost::dynamic_pointer_cast<const WorkspaceGroup>(wksp);
-    if (wsGroup) {
-      auto options = m_exploreMantid->chooseContourPlotOptions(
-          wsGroup->getNumberOfEntries());
-
-      // TODO: Figure out how to remove the MantidUI dependency
-      auto plotter =
-          Mantid::Kernel::make_unique<MantidGroupPlotGenerator>(this);
-      plotter->plotContour(wsGroup, options);
-    }
-  }
+                                 showTiledOpt, isAdvanced);
+}
+
+void MantidUI::plotContour(bool accepted, int plotIndex,
+                           const QString &axisName, const QString &logName,
+                           const std::set<double> &customLogValues,
+                           const QList<QString> &workspaceNames) {
+  auto workspaces = getWorkspacesFromAds(workspaceNames);
+  auto plotter =
+      Mantid::Kernel::make_unique<MantidSurfaceContourPlotGenerator>(this);
+  plotter->plotContour(accepted, plotIndex, axisName, logName, customLogValues,
+                       workspaces);
+}
+
+void MantidUI::plotSurface(bool accepted, int plotIndex,
+                           const QString &axisName, const QString &logName,
+                           const std::set<double> &customLogValues,
+                           const QList<QString> &workspaceNames) {
+  auto workspaces = getWorkspacesFromAds(workspaceNames);
+  auto plotter =
+      Mantid::Kernel::make_unique<MantidSurfaceContourPlotGenerator>(this);
+  plotter->plotSurface(accepted, plotIndex, axisName, logName, customLogValues,
+                       workspaces);
 }
 
 QWidget *MantidUI::getParent() { return m_appWindow; }
diff --git a/MantidPlot/src/Mantid/MantidUI.h b/MantidPlot/src/Mantid/MantidUI.h
index c933f8585a024de336bd0041f59453c714c78e75..6be38e865948005ae0a58b30dbfe7666d83f3a70 100644
--- a/MantidPlot/src/Mantid/MantidUI.h
+++ b/MantidPlot/src/Mantid/MantidUI.h
@@ -8,6 +8,7 @@
 #include "../Graph.h"
 #include "MantidQtAPI/IProjectSerialisable.h"
 #include "MantidQtMantidWidgets/MantidDisplayBase.h"
+#include "MantidQtMantidWidgets/MantidWSIndexDialog.h"
 
 #include "MantidAPI/Algorithm.h"
 #include "MantidAPI/AlgorithmFactory.h"
@@ -23,6 +24,8 @@
 #include "MantidQtAPI/MantidAlgorithmMetatype.h"
 #include "MantidQtAPI/QwtWorkspaceSpectrumData.h"
 
+#include "MantidPlotUtilities.h"
+
 #include <Poco/NObserver.h>
 
 #include <QApplication>
@@ -209,12 +212,12 @@ public:
   // Table.
   Table *createTableFromSelectedRows(MantidMatrix *m, bool errs = true,
                                      bool binCentres = false);
-  MantidQt::MantidWidgets::MantidSurfacePlotDialog *
-  createSurfacePlotDialog(int flags, QStringList wsNames,
-                          const QString &plotType) override;
+
+  // Create dialog box for what to plot and how
   MantidQt::MantidWidgets::MantidWSIndexDialog *
-  createWorkspaceIndexDialog(int flags, QStringList wsNames, bool showWaterfall,
-                             bool showPlotAll, bool showTiledOpt) override;
+  createWorkspaceIndexDialog(int flags, const QStringList &wsNames,
+                             bool showWaterfall, bool showPlotAll,
+                             bool showTiledOpt, bool isAdvanced) override;
 
   /// Create a 1d graph form a Table
   MultiLayer *createGraphFromTable(Table *t, int type = 0);
@@ -256,9 +259,6 @@ public:
                MantidQt::DistributionFlag distr = MantidQt::DistributionDefault,
                bool errs = false, MultiLayer *plotWindow = nullptr);
 
-  void showSurfacePlot() override;
-  void showContourPlot() override;
-
 #ifdef MAKE_VATES
   bool doesVatesSupportOpenGL() override;
 #endif
@@ -287,13 +287,29 @@ public slots:
          bool errs = false,
          GraphOptions::CurveType style = GraphOptions::Unspecified,
          MultiLayer *plotWindow = NULL, bool clearWindow = false,
-         bool waterfallPlot = false);
+         bool waterfallPlot = false, const QString &log = "",
+         const std::set<double> &customLogValues = std::set<double>(),
+         const bool multipleSpectra = false);
 
   MultiLayer *
   plot1D(const QMultiMap<QString, std::set<int>> &toPlot, bool spectrumPlot,
          MantidQt::DistributionFlag distr = MantidQt::DistributionDefault,
          bool errs = false, MultiLayer *plotWindow = NULL,
-         bool clearWindow = false, bool waterfallPlot = false) override;
+         bool clearWindow = false, bool waterfallPlot = false,
+         const QString &log = "",
+         const std::set<double> &customLogValues = std::set<double>()) override;
+
+  /// Plot contour
+  void plotContour(bool accepted, int plotIndex, const QString &axisName,
+                   const QString &logName,
+                   const std::set<double> &customLogValues,
+                   const QList<QString> &workspaceNames) override;
+
+  /// Plot surface
+  void plotSurface(bool accepted, int plotIndex, const QString &axisName,
+                   const QString &logName,
+                   const std::set<double> &customLogValues,
+                   const QList<QString> &workspaceNames) override;
 
   /// Draw a color fill plot for each of the listed workspaces
   void drawColorFillPlots(
@@ -620,6 +636,12 @@ private:
                              const QString &wsName,
                              const std::set<int> &spectra);
 
+  /// Get log values and put into a curve spec list
+  void putLogsIntoCurveSpecs(
+      std::vector<CurveSpec> &curveSpecList,
+      const QMultiMap<QString, int> &toPlot, const QString &log,
+      const std::set<double> &customLogValues = std::set<double>());
+
   // Private variables
 
   ApplicationWindow *m_appWindow; // QtiPlot main ApplicationWindow
diff --git a/MantidPlot/src/ProjectSaveView.cpp b/MantidPlot/src/ProjectSaveView.cpp
index 5fe91179c2f067538859bb00e7416fea694ad113..74b030c511d3179978d067016775d01cf90256b1 100644
--- a/MantidPlot/src/ProjectSaveView.cpp
+++ b/MantidPlot/src/ProjectSaveView.cpp
@@ -205,6 +205,9 @@ void ProjectSaveView::save(bool checked) {
   emit projectSaved();
 
   close();
+  // Set the result code after calling close() because
+  // close() sets it to QDialog::Rejected
+  setResult(QDialog::Accepted);
 }
 
 /**
diff --git a/MantidPlot/test/squish_test_suites/refl_gui_tests/objects.map b/MantidPlot/test/squish_test_suites/refl_gui_tests/objects.map
index 768666ead13d4203ac6daba99d8f84c7bfbdc29e..00d8574e329ff5e80a884411875b6a779db9b9a7 100644
--- a/MantidPlot/test/squish_test_suites/refl_gui_tests/objects.map
+++ b/MantidPlot/test/squish_test_suites/refl_gui_tests/objects.map
@@ -2,6 +2,8 @@
 :2_HeaderViewItem	{container=':tableMain_QHeaderView' text='2' type='HeaderViewItem'}
 :Discard_QPushButton	{text='Discard' type='QPushButton' unnamed='1' visible='1' window=':_QMessageBox_2'}
 :Don't Save_QPushButton	{text='Don\\'t Save' type='QPushButton' unnamed='1' visible='1' window=':_QMessageBox_2'}
+:ISIS Reflectometry (Old).splitterList_QSplitter	{name='splitterList' type='QSplitter' visible='1' window=':ISIS Reflectometry (Old)_ReflGui'}
+:ISIS Reflectometry (Old)_ReflGui	{name='windowRefl' type='ReflGui' visible='1' windowTitle='ISIS Reflectometry (Old)'}
 :ISIS Reflectometry.File_QMenu	{name='menuFile' title='File' type='QMenu' visible='1' window=':ISIS Reflectometry_ReflGui'}
 :ISIS Reflectometry.Instrument:_QLabel	{name='labelInstrument' text='Instrument:' type='QLabel' visible='1' window=':ISIS Reflectometry_ReflGui'}
 :ISIS Reflectometry.menuBar_QMenuBar	{name='menuBar' type='QMenuBar' visible='1' window=':ISIS Reflectometry_ReflGui'}
@@ -24,11 +26,13 @@
 :_QMessageBox_2	{type='QMessageBox' unnamed='1' visible='1'}
 :comboInstrument_QComboBox	{buddy=':ISIS Reflectometry.Instrument:_QLabel' name='comboInstrument' type='QComboBox' visible='1'}
 :splitterList.Process_QPushButton	{container=':ISIS Reflectometry.splitterList_QSplitter' name='buttonProcess' text='Process' type='QPushButton' visible='1'}
+:splitterList.buttonProcess_QPushButton	{container=':ISIS Reflectometry (Old).splitterList_QSplitter' name='buttonProcess' text='Process' type='QPushButton' visible='1'}
 :splitterList.tableMain_QTableWidget	{container=':ISIS Reflectometry.splitterList_QSplitter' name='tableMain' type='QTableWidget' visible='1'}
+:splitterList.tableMain_QTableWidget_2	{container=':ISIS Reflectometry (Old).splitterList_QSplitter' name='tableMain' type='QTableWidget' visible='1'}
 :splitterList.widgetBottomRight_QWidget	{container=':ISIS Reflectometry.splitterList_QSplitter' name='widgetBottomRight' type='QWidget' visible='1'}
-:tableMain.Plot_QPushButton	{container=':splitterList.tableMain_QTableWidget' text='Plot' type='QPushButton' unnamed='1' visible='1'}
-:tableMain.Yes_QPushButton	{container=':splitterList.tableMain_QTableWidget' text='Yes' type='QPushButton' unnamed='1' visible='1'}
-:tableMain_QCheckBox	{container=':splitterList.tableMain_QTableWidget' type='QCheckBox' unnamed='1' visible='1'}
+:tableMain.Plot_QPushButton	{container=':splitterList.tableMain_QTableWidget_2' text='Plot' type='QPushButton' unnamed='1' visible='1'}
+:tableMain.Yes_QPushButton	{container=':splitterList.tableMain_QTableWidget_2' text='Yes' type='QPushButton' unnamed='1' visible='1'}
+:tableMain_QCheckBox	{container=':splitterList.tableMain_QTableWidget_2' type='QCheckBox' unnamed='1' visible='1'}
 :tableMain_QExpandingLineEdit	{columnIndex='0' container=':splitterList.tableMain_QTableWidget' rowIndex='0' type='QExpandingLineEdit' unnamed='1' visible='1'}
 :tableMain_QExpandingLineEdit_2	{columnIndex='1' container=':splitterList.tableMain_QTableWidget' rowIndex='0' type='QExpandingLineEdit' unnamed='1' visible='1'}
 :tableMain_QExpandingLineEdit_3	{columnIndex='2' container=':splitterList.tableMain_QTableWidget' rowIndex='0' type='QExpandingLineEdit' unnamed='1' visible='1'}
diff --git a/MantidPlot/test/squish_test_suites/refl_gui_tests/tst_basic_operation/test.py b/MantidPlot/test/squish_test_suites/refl_gui_tests/tst_basic_operation/test.py
index e173c2494ceb63fcba8f6221894223c340663cbc..292e0cc748b5e59cceb9cbf002fda7fdbc039a52 100644
--- a/MantidPlot/test/squish_test_suites/refl_gui_tests/tst_basic_operation/test.py
+++ b/MantidPlot/test/squish_test_suites/refl_gui_tests/tst_basic_operation/test.py
@@ -18,18 +18,18 @@ class ReflTestHarness:
     def __setup(self):
         activateItem(waitForObjectItem(":_QMenuBar", "Interfaces"))
         activateItem(waitForObjectItem(":MantidPlot - untitled.Interfaces_QMenu", "Reflectometry"))
-        refl_gui = waitForObjectItem(":Interfaces.Reflectometry_QMenu", "ISIS Reflectometry (Old)")
+        refl_gui = waitForObjectItem(":Interfaces.Reflectometry_QMenu", "ISIS Reflectometry Old")
         activateItem(refl_gui)
 
     def process_everything(self):
         # Hit process
-        clickButton(waitForObject(":splitterList.Process_QPushButton"))
+        clickButton(waitForObject(":splitterList.buttonProcess_QPushButton"))
         # Agree to process everything
         setWindowState(waitForObject(":_QMessageBox"), WindowState.Normal)
         clickButton(waitForObject(":tableMain.Yes_QPushButton"))
         
     def __teardown(self):
-        sendEvent("QCloseEvent", waitForObject(":ISIS Reflectometry_ReflGui"))
+        sendEvent("QCloseEvent", waitForObject(":ISIS Reflectometry (Old)_ReflGui"))
         dont_save_name = ":Don't Save_QPushButton"
         if(object.exists(dont_save_name)):
             clickButton(waitForObject(dont_save_name))
diff --git a/MantidQt/API/inc/MantidQtAPI/MantidQwtWorkspaceData.h b/MantidQt/API/inc/MantidQtAPI/MantidQwtWorkspaceData.h
index e6bf6801417f5c9f6c15a986d3d620ea57fd6feb..19aa809d348090bce5cab7de862e357b3613aa2c 100644
--- a/MantidQt/API/inc/MantidQtAPI/MantidQwtWorkspaceData.h
+++ b/MantidQt/API/inc/MantidQtAPI/MantidQwtWorkspaceData.h
@@ -53,19 +53,19 @@ public:
   virtual size_t esize() const;
   virtual double e(size_t i) const;
   virtual double ex(size_t i) const;
+  bool isPlottable() const;
   virtual void setLogScaleY(bool on);
   virtual bool logScaleY() const;
-  virtual void saveLowestPositiveValue(const double v);
+  void setMinimumPositiveValue(const double v);
   virtual double getYMin() const;
   virtual double getYMax() const;
-
   virtual void setXOffset(const double x);
   virtual void setYOffset(const double y);
   virtual void setWaterfallPlot(bool on);
   virtual bool isWaterfallPlot() const;
   double offsetY() const { return m_offsetY; }
 
-  void calculateYMinAndMax(/*const std::vector<double> &yvalues*/) const;
+  void calculateYMinAndMax() const;
 
 protected:
   virtual double getX(size_t i) const = 0;
@@ -74,6 +74,8 @@ protected:
   virtual double getEX(size_t i) const = 0;
 
 private:
+  enum class DataStatus : uint8_t { Undefined, NotPlottable, Plottable };
+
   /// Indicates that the data is plotted on a log y scale
   bool m_logScaleY;
 
@@ -86,6 +88,9 @@ private:
   /// highest y value
   mutable double m_maxY;
 
+  /// True if data is 'sensible' to plot
+  mutable DataStatus m_plottable;
+
   /// Indicates whether or not waterfall plots are enabled
   bool m_isWaterfall;
 
diff --git a/MantidQt/API/inc/MantidQtAPI/MdSettings.h b/MantidQt/API/inc/MantidQtAPI/MdSettings.h
index dd362ccf2e40eeec36d7d5c392933ecd6988c25e..0712462c47b27d9765d7d11a52c8680ecda4a39f 100644
--- a/MantidQt/API/inc/MantidQtAPI/MdSettings.h
+++ b/MantidQt/API/inc/MantidQtAPI/MdSettings.h
@@ -165,6 +165,9 @@ public:
    */
   bool getLastSessionLogScale();
 
+  bool getUserSettingAutoColorAxes();
+  void setUserSettingAutoColorAxes(bool autoScale);
+
 private:
   MdConstants m_mdConstants;
 
@@ -186,6 +189,7 @@ private:
 
   QString m_lblUserSettingInitialView;
   QString m_lblLastSessionLogScale;
+  QString m_lblUserSettingAutoColorAxes;
 };
 }
 }
diff --git a/MantidQt/API/inc/MantidQtAPI/QwtWorkspaceSpectrumData.h b/MantidQt/API/inc/MantidQtAPI/QwtWorkspaceSpectrumData.h
index 9bd9e55162297634c93ba61e80c935cfc13b7ca3..271e2ab4fc530ff9837e451e0d531003e4db5f42 100644
--- a/MantidQt/API/inc/MantidQtAPI/QwtWorkspaceSpectrumData.h
+++ b/MantidQt/API/inc/MantidQtAPI/QwtWorkspaceSpectrumData.h
@@ -51,8 +51,6 @@ public:
   /// Number of error bars to plot
   size_t esize() const override;
 
-  // double getYMin() const override;
-  // double getYMax() const override;
   /// Return the label to use for the X axis
   QString getXAxisLabel() const override;
   /// Return the label to use for the Y axis
@@ -61,17 +59,8 @@ public:
   bool isHistogram() const { return m_isHistogram; }
   bool dataIsNormalized() const { return m_dataIsNormalized; }
 
-  ///// Inform the data that it is to be plotted on a log y scale
-  // void setLogScale(bool on) override;
-  // bool logScaleY() const override { return m_logScaleY; }
-  // void saveLowestPositiveValue(const double v) override;
   bool setAsDistribution(bool on = true);
 
-  //// Sets offsets for and enables waterfall plots
-  // void setXOffset(const double x) override;
-  // void setYOffset(const double y) override;
-  // void setWaterfallPlot(bool on) override;
-
 protected:
   // Assignment operator (virtualized). MSVC not happy with compiler generated
   // one
diff --git a/MantidQt/API/src/MantidQwtIMDWorkspaceData.cpp b/MantidQt/API/src/MantidQwtIMDWorkspaceData.cpp
index 076e3c3a3ace055eddecc9f4587f117d5206d99c..04eabd9041b3abfb26d2410cd82d44eafac7b12d 100644
--- a/MantidQt/API/src/MantidQwtIMDWorkspaceData.cpp
+++ b/MantidQt/API/src/MantidQwtIMDWorkspaceData.cpp
@@ -159,7 +159,12 @@ void MantidQwtIMDWorkspaceData::calculateMinMax() { calculateYMinAndMax(); }
 //-----------------------------------------------------------------------------
 /** Size of the data set
  */
-size_t MantidQwtIMDWorkspaceData::size() const { return m_Y.size(); }
+size_t MantidQwtIMDWorkspaceData::size() const {
+  if (!isPlottable()) {
+    return 0;
+  }
+  return m_Y.size();
+}
 
 /** Return the x value of data point i
 @param i :: Index
@@ -199,7 +204,12 @@ double MantidQwtIMDWorkspaceData::getEX(size_t i) const {
 double MantidQwtIMDWorkspaceData::getE(size_t i) const { return m_E[i]; }
 
 /// Number of error bars to plot
-size_t MantidQwtIMDWorkspaceData::esize() const { return m_E.size(); }
+size_t MantidQwtIMDWorkspaceData::esize() const {
+  if (!isPlottable()) {
+    return 0;
+  }
+  return m_E.size();
+}
 
 bool MantidQwtIMDWorkspaceData::setAsDistribution(bool on) {
   m_isDistribution = on;
diff --git a/MantidQt/API/src/MantidQwtWorkspaceData.cpp b/MantidQt/API/src/MantidQwtWorkspaceData.cpp
index 7e450ded1b99a6fb408bb78a1a5b13fd22d9efe4..0913a30986af6adf4e9d347b5ab9449bd919d693 100644
--- a/MantidQt/API/src/MantidQwtWorkspaceData.cpp
+++ b/MantidQt/API/src/MantidQwtWorkspaceData.cpp
@@ -2,9 +2,19 @@
 
 #include <cmath>
 
+namespace {
+/// Minimum value considered positive
+constexpr double MIN_POSITIVE = 1e-3;
+/// Maximum value considered positive
+constexpr double MAX_POSITIVE = 1e30;
+/// Arbitrary multiplier between min/max if they are equal
+constexpr double MIN_MAX_DELTA = 1.001;
+}
+
 MantidQwtWorkspaceData::MantidQwtWorkspaceData(bool logScaleY)
     : m_logScaleY(logScaleY), m_minY(0), m_minPositive(0), m_maxY(0),
-      m_isWaterfall(false), m_offsetX(0), m_offsetY(0) {}
+      m_plottable(DataStatus::Undefined), m_isWaterfall(false), m_offsetX(0),
+      m_offsetY(0) {}
 
 MantidQwtWorkspaceData::MantidQwtWorkspaceData(
     const MantidQwtWorkspaceData &data) {
@@ -18,6 +28,7 @@ operator=(const MantidQwtWorkspaceData &data) {
   m_minY = data.m_minY;
   m_minPositive = data.m_minPositive;
   m_maxY = data.m_maxY;
+  m_plottable = data.m_plottable;
   m_isWaterfall = data.m_isWaterfall;
   m_offsetX = data.m_offsetX;
   m_offsetY = data.m_offsetY;
@@ -27,11 +38,16 @@ operator=(const MantidQwtWorkspaceData &data) {
 /// Calculate absolute minimum and maximum values in a vector. Also find the
 /// smallest positive value.
 void MantidQwtWorkspaceData::calculateYMinAndMax() const {
-
-  const double maxDouble = std::numeric_limits<double>::max();
-  double curMin = maxDouble;
-  double curMinPos = maxDouble;
-  double curMax = -maxDouble;
+  // Set this to true to get the "real" data size
+  // It's correct value is then recalculated below. This is not
+  // too nice but a big refactor is not worth it given the new
+  // workbench/plotting developments.
+  m_plottable = DataStatus::Plottable;
+  m_minY = m_maxY = m_minPositive = 0.0;
+
+  double ymin(std::numeric_limits<double>::max()),
+      ymax(-std::numeric_limits<double>::max()),
+      yminPos(std::numeric_limits<double>::max());
   for (size_t i = 0; i < size(); ++i) {
     auto val = y(i);
     // skip NaNs
@@ -39,41 +55,47 @@ void MantidQwtWorkspaceData::calculateYMinAndMax() const {
       continue;
 
     // Update our values as appropriate
-    if (val < curMin)
-      curMin = val;
-    if (val < curMinPos && val > 0)
-      curMinPos = val;
-    if (val > curMax)
-      curMax = val;
-  }
-
-  // Save the results
-  if (curMin == maxDouble) {
-    m_minY = 0.0;
-    m_minPositive = 0.1;
-    m_maxY = 1.0;
-    return;
-  } else {
-    m_minY = curMin;
-  }
-
-  if (curMax == curMin) {
-    curMax *= 1.1;
+    if (val < ymin)
+      ymin = val;
+    if (val > 0.0 && val < yminPos)
+      yminPos = val;
+    if (val > ymax)
+      ymax = val;
   }
-  m_maxY = curMax;
 
-  if (curMinPos == maxDouble) {
-    m_minPositive = 0.1;
+  if (ymin < std::numeric_limits<double>::max()) {
+    // Values are sensible range
+    m_minY = ymin;
+    // Ensure there is a difference beteween max and min
+    m_maxY = (ymax != ymin) ? ymax : ymin * MIN_MAX_DELTA;
+
+    // Minimum positive value is kept for log scales
+    if (yminPos < std::numeric_limits<double>::max()) {
+      m_minPositive = yminPos;
+      m_plottable = DataStatus::Plottable;
+    } else {
+      // All values are <= 0
+      m_minPositive = MIN_POSITIVE;
+      m_plottable =
+          logScaleY() ? DataStatus::NotPlottable : DataStatus::Plottable;
+    }
   } else {
-    m_minPositive = curMinPos;
+    // Set to arbitrary values (this is unlikely to happen)
+    m_minY = 0.0;
+    m_maxY = MAX_POSITIVE;
+    m_minPositive = MIN_POSITIVE;
+    m_plottable = DataStatus::NotPlottable;
   }
 }
 
-void MantidQwtWorkspaceData::setLogScaleY(bool on) { m_logScaleY = on; }
+void MantidQwtWorkspaceData::setLogScaleY(bool on) {
+  m_logScaleY = on;
+  calculateYMinAndMax();
+}
 
 bool MantidQwtWorkspaceData::logScaleY() const { return m_logScaleY; }
 
-void MantidQwtWorkspaceData::saveLowestPositiveValue(const double v) {
+void MantidQwtWorkspaceData::setMinimumPositiveValue(const double v) {
   if (v > 0)
     m_minPositive = v;
 }
@@ -91,7 +113,7 @@ bool MantidQwtWorkspaceData::isWaterfallPlot() const { return m_isWaterfall; }
  * @return the lowest y value.
  */
 double MantidQwtWorkspaceData::getYMin() const {
-  if (m_minPositive == 0.0) {
+  if (m_plottable == DataStatus::Undefined) {
     calculateYMinAndMax();
   }
   return m_logScaleY ? m_minPositive : m_minY;
@@ -102,7 +124,7 @@ double MantidQwtWorkspaceData::getYMin() const {
  * @return the highest y value.
  */
 double MantidQwtWorkspaceData::getYMax() const {
-  if (m_minPositive == 0.0) {
+  if (m_plottable == DataStatus::Undefined) {
     calculateYMinAndMax();
   }
   if (m_logScaleY && m_maxY <= 0)
@@ -121,7 +143,12 @@ double MantidQwtWorkspaceData::y(size_t i) const {
   return tmp;
 }
 
-size_t MantidQwtWorkspaceData::esize() const { return this->size(); }
+size_t MantidQwtWorkspaceData::esize() const {
+  if (!isPlottable()) {
+    return 0;
+  }
+  return this->size();
+}
 
 double MantidQwtWorkspaceData::e(size_t i) const {
   double ei = getE(i);
@@ -137,5 +164,19 @@ double MantidQwtWorkspaceData::e(size_t i) const {
 
 double MantidQwtWorkspaceData::ex(size_t i) const { return getEX(i); }
 
+/**
+ * @brief MantidQwtWorkspaceData::isPlottable
+ * Data is considered plottable if either:
+ *   - scale != log or
+ *   - scale == log & all(y) > 0.0
+ * @return True if the data is considered plottable, false otherwise
+ */
+bool MantidQwtWorkspaceData::isPlottable() const {
+  return (m_plottable == DataStatus::Plottable);
+}
+
+//------------------------------------------------------------------------------
+// MantidQwtMatrixWorkspaceData class
+//------------------------------------------------------------------------------
 MantidQwtMatrixWorkspaceData::MantidQwtMatrixWorkspaceData(bool logScaleY)
     : MantidQwtWorkspaceData(logScaleY) {}
diff --git a/MantidQt/API/src/MdSettings.cpp b/MantidQt/API/src/MdSettings.cpp
index a7a6f3c58fd3b49f5f7be0b5115968abdf9ed6c9..1eca8833de4edcc23f1c40281ad152fe06e966f7 100644
--- a/MantidQt/API/src/MdSettings.cpp
+++ b/MantidQt/API/src/MdSettings.cpp
@@ -22,7 +22,10 @@ MdSettings::MdSettings()
       m_lblSliceViewerColorMap(
           "ColormapFile"), // This is the same as in Slice Viewer !!,
       m_lblUserSettingInitialView("initialview"),
-      m_lblLastSessionLogScale("lastsessionlogscale") {
+      m_lblLastSessionLogScale("lastsessionlogscale"),
+      m_lblUserSettingAutoColorAxes("usersettingautocoloraxes")
+
+{
   m_mdConstants.initializeSettingsConstants();
 }
 
@@ -215,4 +218,23 @@ void MdSettings::setUserSettingIntialView(QString initialView) {
   settings.beginGroup(m_vsiGroup);
   settings.setValue(m_lblUserSettingInitialView, initialView);
   settings.endGroup();
-}
\ No newline at end of file
+}
+
+bool MdSettings::getUserSettingAutoColorAxes() {
+  QSettings settings;
+
+  settings.beginGroup(m_vsiGroup);
+  bool autoColor =
+      settings.value(m_lblUserSettingAutoColorAxes, false).toBool();
+  settings.endGroup();
+
+  return autoColor;
+}
+
+void MdSettings::setUserSettingAutoColorAxes(bool autoColor) {
+  QSettings settings;
+
+  settings.beginGroup(m_vsiGroup);
+  settings.setValue(m_lblUserSettingAutoColorAxes, autoColor);
+  settings.endGroup();
+}
diff --git a/MantidQt/API/src/QwtWorkspaceBinData.cpp b/MantidQt/API/src/QwtWorkspaceBinData.cpp
index 1408a915bf058796e1eab73bbab25a4042fc90eb..fff267417f87ed37bc4f7df67df7fe0566875c70 100644
--- a/MantidQt/API/src/QwtWorkspaceBinData.cpp
+++ b/MantidQt/API/src/QwtWorkspaceBinData.cpp
@@ -32,7 +32,12 @@ QwtWorkspaceBinData *QwtWorkspaceBinData::copyWithNewSource(
 
 /** Size of the data set
  */
-size_t QwtWorkspaceBinData::size() const { return m_Y.size(); }
+size_t QwtWorkspaceBinData::size() const {
+  if (!isPlottable()) {
+    return 0;
+  }
+  return m_Y.size();
+}
 
 /**
 Return the x value of data point i
@@ -71,7 +76,6 @@ QString QwtWorkspaceBinData::getYAxisLabel() const { return m_yTitle; }
 QwtWorkspaceBinData &QwtWorkspaceBinData::
 operator=(const QwtWorkspaceBinData &rhs) {
   if (this != &rhs) {
-    static_cast<MantidQwtMatrixWorkspaceData &>(*this) = rhs;
     m_binIndex = rhs.m_binIndex;
     m_X = rhs.m_X;
     m_Y = rhs.m_Y;
diff --git a/MantidQt/API/src/QwtWorkspaceSpectrumData.cpp b/MantidQt/API/src/QwtWorkspaceSpectrumData.cpp
index 1c1af2e4584898088869fbac26698cdae9117657..b0f92cfd1bd488120eabcd74f9b71a9d3a473179 100644
--- a/MantidQt/API/src/QwtWorkspaceSpectrumData.cpp
+++ b/MantidQt/API/src/QwtWorkspaceSpectrumData.cpp
@@ -53,6 +53,9 @@ QwtWorkspaceSpectrumData *QwtWorkspaceSpectrumData::copyWithNewSource(
 /** Size of the data set
  */
 size_t QwtWorkspaceSpectrumData::size() const {
+  if (!isPlottable()) {
+    return 0;
+  }
   if (m_binCentres || m_isHistogram) {
     return m_Y.size();
   }
@@ -94,7 +97,12 @@ double QwtWorkspaceSpectrumData::getE(size_t i) const {
   return ei;
 }
 
-size_t QwtWorkspaceSpectrumData::esize() const { return m_E.size(); }
+size_t QwtWorkspaceSpectrumData::esize() const {
+  if (!isPlottable()) {
+    return 0;
+  }
+  return m_E.size();
+}
 
 /**
  * @return A string containin the text to use as an X axis label
@@ -121,7 +129,6 @@ bool QwtWorkspaceSpectrumData::setAsDistribution(bool on) {
 QwtWorkspaceSpectrumData &QwtWorkspaceSpectrumData::
 operator=(const QwtWorkspaceSpectrumData &rhs) {
   if (this != &rhs) {
-    static_cast<MantidQwtMatrixWorkspaceData &>(*this) = rhs;
     m_wsIndex = rhs.m_wsIndex;
     m_X = rhs.m_X;
     m_Y = rhs.m_Y;
diff --git a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/ILLEnergyTransfer.h b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/ILLEnergyTransfer.h
index 04ffa09f590186cb84dc64a5d243f5b90d80288f..98395d7c36a65aeeb3d99d8d8af2438068cdc78e 100644
--- a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/ILLEnergyTransfer.h
+++ b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/ILLEnergyTransfer.h
@@ -55,9 +55,9 @@ private:
   double m_backScaling = 1.;
   double m_peakRange[2];
   int m_pixelRange[2];
+  std::string m_suffix;
   void save();
   void plot();
-  void convertTo2Theta();
 };
 } // namespace CustomInterfaces
 } // namespace Mantid
diff --git a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/ILLEnergyTransfer.ui b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/ILLEnergyTransfer.ui
index 5a6f59d938ebe022efe069ad21db8305209cfe9b..1d349951b7f0267c05b39aeac05060c268230ae7 100644
--- a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/ILLEnergyTransfer.ui
+++ b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/ILLEnergyTransfer.ui
@@ -53,7 +53,7 @@
          <string>Input files</string>
         </property>
         <property name="label" stdset="0">
-         <string>Run File</string>
+         <string>Sample Run</string>
         </property>
         <property name="multipleFiles" stdset="0">
          <bool>true</bool>
@@ -467,7 +467,7 @@
          <string>IndirectILLReductionQENS|BackgroundRun</string>
         </property>
         <property name="label" stdset="0">
-         <string>Background File</string>
+         <string>Empty Container Run</string>
         </property>
         <property name="multipleFiles" stdset="0">
          <bool>true</bool>
@@ -556,7 +556,7 @@
          <string>Calibration files (vanadium)</string>
         </property>
         <property name="label" stdset="0">
-         <string>Calibration File </string>
+         <string>Vanadium Run</string>
         </property>
         <property name="algorithmAndProperty" stdset="0">
          <string>IndirectILLReductionQENS|CalibrationRun</string>
@@ -696,7 +696,7 @@
          <string>Plot the reduced workspace</string>
         </property>
         <property name="text">
-         <string>Plot Result</string>
+         <string>Plot</string>
         </property>
        </widget>
       </item>
@@ -746,15 +746,39 @@
        </widget>
       </item>
       <item>
-       <widget class="QCheckBox" name="ck2Theta">
-        <property name="toolTip">
-         <string>Convert the y-axis of the output also to 2theta.</string>
-        </property>
+       <widget class="QLabel" name="lbSpectrumAxis">
         <property name="text">
-         <string>Convert to 2Theta</string>
+         <string>Spectrum Axis:</string>
         </property>
        </widget>
       </item>
+      <item>
+       <widget class="QComboBox" name="cbSpectrumTarget">
+        <property name="toolTip">
+         <string>Choose the target for spectrum axis.</string>
+        </property>
+        <item>
+         <property name="text">
+          <string>SpectrumNumber</string>
+         </property>
+        </item>
+        <item>
+         <property name="text">
+          <string>2Theta</string>
+         </property>
+        </item>
+        <item>
+         <property name="text">
+          <string>Q</string>
+         </property>
+        </item>
+        <item>
+         <property name="text">
+          <string>Q2</string>
+         </property>
+        </item>
+       </widget>
+      </item>
       <item>
        <widget class="Line" name="line_2">
         <property name="orientation">
@@ -774,7 +798,7 @@
          <string>Save the reduced workspace</string>
         </property>
         <property name="text">
-         <string>Save Result</string>
+         <string>Save</string>
         </property>
        </widget>
       </item>
@@ -1288,7 +1312,7 @@
   </connection>
  </connections>
  <buttongroups>
-  <buttongroup name="buttonGroup_2"/>
   <buttongroup name="buttonGroup"/>
+  <buttongroup name="buttonGroup_2"/>
  </buttongroups>
 </ui>
diff --git a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Muon/MuonAnalysis.h b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Muon/MuonAnalysis.h
index cba60e4209dae75e4f7f4d68698c1007c0b17e6b..cd1a37e8bebf3ce4b291169a6c5cb8c480421c70 100644
--- a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Muon/MuonAnalysis.h
+++ b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Muon/MuonAnalysis.h
@@ -101,7 +101,10 @@ signals:
 private slots:
   /// Guess Alpha clicked
   void guessAlphaClicked();
-
+  void handleGroupBox();
+  void handlePeriodBox();
+  void setChosenGroupSlot(QString &group);
+  void setChosenPeriodSlot(QString &period);
   /// Checks whether two specified periods are equal and, if they are, sets
   /// second one to None
   void checkForEqualPeriods();
@@ -334,6 +337,9 @@ private:
   /// Plots specific WS spectrum (used by plotPair and plotGroup)
   void plotSpectrum(const QString &wsName, bool logScale = false);
 
+  /// set labels for a single data set
+  void updateLabels(std::string &name);
+
   /// Get current plot style parameters. wsName and wsIndex are used to get
   /// default values if
   /// something is not specified
diff --git a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Muon/MuonAnalysisFitDataPresenter.h b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Muon/MuonAnalysisFitDataPresenter.h
index 15fc9737389bf3d2985d78bba7f765a8a7cc8fc7..5dc36a42339d35148192c74a3947e3600670c763 100644
--- a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Muon/MuonAnalysisFitDataPresenter.h
+++ b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Muon/MuonAnalysisFitDataPresenter.h
@@ -120,7 +120,11 @@ public:
   void setOverwrite(bool enabled) { m_overwrite = enabled; }
   /// Updates label to avoid overwriting existing results
   void checkAndUpdateFitLabel(bool sequentialFit);
-
+  /// Generate names of workspaces to be created
+  std::vector<std::string> generateWorkspaceNames(bool overwrite) const;
+signals:
+  void setChosenGroupSignal(const QString &group);
+  void setChosenPeriodSignal(const QString &period);
 public slots:
   /// Transforms fit results when a simultaneous fit finishes
   void handleFitFinished(const QString &status = QString("success")) const;
@@ -140,8 +144,6 @@ public slots:
   void doPreFitChecks(bool sequentialFit);
 
 private:
-  /// Generate names of workspaces to be created
-  std::vector<std::string> generateWorkspaceNames(bool overwrite) const;
   /// Create analysis workspace
   Mantid::API::Workspace_sptr createWorkspace(const std::string &name,
                                               std::string &groupLabel) const;
@@ -199,4 +201,4 @@ private:
 } // namespace CustomInterfaces
 } // namespace Mantid
 
-#endif /* MANTID_CUSTOMINTERFACES_MUONANALYSISFITDATAPRESENTER_H_ */
\ No newline at end of file
+#endif /* MANTID_CUSTOMINTERFACES_MUONANALYSISFITDATAPRESENTER_H_ */
diff --git a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/IReflEventView.h b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/IReflEventView.h
index f04ffc9b5a281d107f50b08d2a376d72e8a31193..f4b9b584e96552ad4bc9384e29182299c05645ef 100644
--- a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/IReflEventView.h
+++ b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/IReflEventView.h
@@ -46,6 +46,9 @@ public:
   /// Returns the presenter managing this view
   virtual IReflEventPresenter *getPresenter() const = 0;
 
+  /// Slice type enums
+  enum class SliceType { UniformEven, Uniform, Custom, LogValue };
+
   virtual std::string getTimeSlicingValues() const = 0;
   virtual std::string getTimeSlicingType() const = 0;
 };
diff --git a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/QtReflEventView.h b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/QtReflEventView.h
index 6b4e98a8594238cfb07df3d1d0c36e2e02061b1c..a69f90912cfccc072fb29e01ec5263cf1acff5fe 100644
--- a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/QtReflEventView.h
+++ b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/QtReflEventView.h
@@ -64,7 +64,9 @@ private:
   std::unique_ptr<IReflEventPresenter> m_presenter;
 
   /// Current slice type
-  mutable std::string m_sliceType;
+  mutable SliceType m_sliceType;
+  /// Slice type to string conversion map
+  std::map<SliceType, std::string> m_sliceTypeMap;
 
   /// List of radio buttons
   std::vector<QRadioButton *> m_buttonList;
diff --git a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/ReflDataProcessorPresenter.h b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/ReflDataProcessorPresenter.h
index fef415c81f56581339e5ab0e5a98c293fbbe9219..f8517f21576737a415ccb0757839bf7527c2622b 100644
--- a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/ReflDataProcessorPresenter.h
+++ b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/ReflDataProcessorPresenter.h
@@ -91,14 +91,21 @@ private:
   void parseCustom(const std::string &timeSlicing,
                    std::vector<double> &startTimes,
                    std::vector<double> &stopTimes);
+  // Parse log value slicing and filter from input string
+  void parseLogValue(const std::string &inputStr, std::string &logFilter,
+                     std::vector<double> &minValues,
+                     std::vector<double> &maxValues);
 
   // Load a run as event workspace
   bool loadEventRun(const std::string &runNo);
   // Load a run (non-event workspace)
   void loadNonEventRun(const std::string &runNo);
+
   // Take a slice from event workspace
   std::string takeSlice(const std::string &runNo, size_t sliceIndex,
-                        double startTime, double stopTime);
+                        double startTime, double stopTime,
+                        const std::string &logFilter = "");
+
   // Asks user if they wish to proceed if a type of workspace exists in the ADS
   bool proceedIfWSTypeInADS(const MantidQt::MantidWidgets::TreeData &data,
                             const bool findEventWS);
diff --git a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/ReflEventWidget.ui b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/ReflEventWidget.ui
index ec8c55a209f457db248a6c5f91fabb6752abdee7..3f540e2646ba770c64d12ce6564f1dad276c7aad 100644
--- a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/ReflEventWidget.ui
+++ b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/ReflEventWidget.ui
@@ -129,7 +129,7 @@
             <item>
               <widget class="QLabel" name="customLabel">
                 <property name="text">
-                  <string> Python list (sec)</string>
+                  <string>Python list (sec)</string>
                 </property>
               </widget>
             </item>
@@ -159,6 +159,69 @@
           </layout>
         </widget>
       </item>
+      <item>
+        <widget class="QGroupBox" name="logValueGroup">
+          <property name="title">
+            <string>Slicing by log value</string>
+          </property>
+          <layout class="QHBoxLayout" name="logValueLayout">
+            <item>
+              <widget class="QRadioButton" name="logValueButton">
+                <attribute name="buttonGroup">
+                  <string notr="true">slicingOptionsButtonGroup</string>
+                </attribute>
+              </widget>
+            </item>
+            <item>
+              <widget class="QLabel" name="logValueLabel">
+                <property name="text">
+                  <string>Python list (sec)</string>
+                </property>
+              </widget>
+            </item>
+            <item>
+              <widget class="QLineEdit" name="logValueEdit">
+                <property name="sizeHint" stdset="0">
+                  <size>
+                    <width>10</width>
+                    <height>10</height>
+                  </size>
+                </property>
+              </widget>
+            </item>
+            <item>
+              <widget class="QLabel" name="logValueTypeLabel">
+                <property name="text">
+                  <string>Log name</string>
+                </property>
+              </widget>
+            </item>
+            <item>
+              <widget class="QLineEdit" name="logValueTypeEdit">
+                <property name="sizeHint" stdset="0">
+                  <size>
+                    <width>10</width>
+                    <height>10</height>
+                  </size>
+                </property>
+              </widget>
+            </item>
+            <item>
+              <spacer name="logValueSpacer1">
+                <property name="orientation">
+                  <enum>Qt::Horizontal</enum>
+                </property>
+                <property name="sizeHint" stdset="0">
+                  <size>
+                    <width>20</width>
+                    <height>20</height>
+                  </size>
+                </property>
+              </spacer>
+            </item>
+          </layout>
+        </widget>
+      </item>
       <item>
         <spacer name="verticalSpacer">
           <property name="orientation">
diff --git a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/SANSRunWindow.h b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/SANSRunWindow.h
index 2d6fa9ab13fde2862a0e6bf2dfeb08f9ab2669bb..7c1bd882acd50dc2c1b14150a33c187e745779cb 100644
--- a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/SANSRunWindow.h
+++ b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/SANSRunWindow.h
@@ -514,6 +514,10 @@ private:
   bool isValidUserFile();
   /// Update IDF file path
   void updateIDFFilePath();
+  /// Update IDF file path when running in Batch mode
+  void updateIDFFilePathForBatch();
+  //// Update IDF information
+  void updateIDFInfo(const QString &command);
 
   UserSubWindow *slicingWindow;
 };
diff --git a/MantidQt/CustomInterfaces/src/Indirect/ILLEnergyTransfer.cpp b/MantidQt/CustomInterfaces/src/Indirect/ILLEnergyTransfer.cpp
index 821b09c0e9bd40e643f8776b79870999ed2b5b86..93ccd6d290390efc46986dc6e1842d1c4182c024 100644
--- a/MantidQt/CustomInterfaces/src/Indirect/ILLEnergyTransfer.cpp
+++ b/MantidQt/CustomInterfaces/src/Indirect/ILLEnergyTransfer.cpp
@@ -232,6 +232,20 @@ void ILLEnergyTransfer::run() {
   reductionAlg->setProperty("Reflection",
                             instDetails["reflection"].toStdString());
 
+  std::string target = m_uiForm.cbSpectrumTarget->currentText().toStdString();
+  reductionAlg->setProperty("SpectrumAxis", target);
+
+  // Keep track of the suffix
+  if (target == "SpectrumNumber") {
+    m_suffix = "_red";
+  } else if (target == "2Theta") {
+    m_suffix = "_2theta";
+  } else if (target == "Q") {
+    m_suffix = "_q";
+  } else if (target == "Q2") {
+    m_suffix = "_q2";
+  }
+
   // Handle mapping file
   bool useMapFile = m_uiForm.rdGroupChoose->isChecked();
   if (useMapFile) {
@@ -269,9 +283,6 @@ void ILLEnergyTransfer::algorithmComplete(bool error) {
     if (m_uiForm.ckPlot->isChecked()) {
       plot();
     }
-    if (m_uiForm.ck2Theta->isChecked()) {
-      convertTo2Theta();
-    }
   }
 
   // Nothing to do here
@@ -285,7 +296,8 @@ void ILLEnergyTransfer::plot() {
                     "from IndirectReductionCommon import plot_reduction\n";
   pyInput += "plot_reduction(mtd[\"";
   pyInput += m_uiForm.leOutWS->text();
-  pyInput += "_red\"].getItem(0).getName(),\"Contour\")\n";
+  pyInput += QString::fromStdString(m_suffix);
+  pyInput += "\"].getItem(0).getName(),\"Contour\")\n";
   m_pythonRunner.runPythonCode(pyInput);
 }
 
@@ -296,23 +308,11 @@ void ILLEnergyTransfer::save() {
   QString pyInput;
   pyInput += "SaveNexusProcessed(\"";
   pyInput += m_uiForm.leOutWS->text();
-  pyInput += "_red\",\"";
+  pyInput += QString::fromStdString(m_suffix);
+  pyInput += "\",\"";
   pyInput += m_uiForm.leOutWS->text();
-  pyInput += "_red.nxs\")\n";
-  m_pythonRunner.runPythonCode(pyInput);
-}
-
-/**
- * Handles the conversion of y-axis to 2theta
- */
-void ILLEnergyTransfer::convertTo2Theta() {
-  QString pyInput;
-  QString inputWS = m_uiForm.leOutWS->text();
-  pyInput += "ConvertSpectrumAxis(InputWorkspace=\"";
-  pyInput += inputWS;
-  pyInput += "_red\",EMode=\"Indirect\",Target=\"Theta\",OutputWorkspace=\"";
-  pyInput += inputWS;
-  pyInput += "_2theta\")\n";
+  pyInput += QString::fromStdString(m_suffix);
+  pyInput += ".nxs\")\n";
   m_pythonRunner.runPythonCode(pyInput);
 }
 
diff --git a/MantidQt/CustomInterfaces/src/Indirect/IndirectDiffractionReduction.cpp b/MantidQt/CustomInterfaces/src/Indirect/IndirectDiffractionReduction.cpp
index 08dc707c1441ec441e735c6b4180fe5396e7851d..07c65385b2aea9e7a8a8283d555b97eb132a730f 100644
--- a/MantidQt/CustomInterfaces/src/Indirect/IndirectDiffractionReduction.cpp
+++ b/MantidQt/CustomInterfaces/src/Indirect/IndirectDiffractionReduction.cpp
@@ -191,8 +191,7 @@ void IndirectDiffractionReduction::plotResults() {
       const auto workspaceExists =
           AnalysisDataService::Instance().doesExist(it);
       if (workspaceExists)
-        pyInput += "plotSpectrum('" + QString::fromStdString(it) +
-                   "', 0, error_bars = True)\n";
+        pyInput += "plotSpectrum('" + QString::fromStdString(it) + "', 0)\n";
       else
         showInformationBox(QString::fromStdString(
             "Workspace '" + it + "' not found\nUnable to plot workspace"));
diff --git a/MantidQt/CustomInterfaces/src/Indirect/IndirectTab.cpp b/MantidQt/CustomInterfaces/src/Indirect/IndirectTab.cpp
index 6137629fa50bfd24b3072c238fbf5493789322e0..011e992ca85117dca7e1a6f447fdcccaa065b9eb 100644
--- a/MantidQt/CustomInterfaces/src/Indirect/IndirectTab.cpp
+++ b/MantidQt/CustomInterfaces/src/Indirect/IndirectTab.cpp
@@ -259,7 +259,7 @@ void IndirectTab::plotSpectrum(const QStringList &workspaceNames, int wsIndex) {
   pyInput += workspaceNames.join("','");
   pyInput += "'], ";
   pyInput += QString::number(wsIndex);
-  pyInput += ", error_bars = True)\n";
+  pyInput += ")\n";
 
   m_pythonRunner.runPythonCode(pyInput);
 }
@@ -303,7 +303,7 @@ void IndirectTab::plotSpectrum(const QStringList &workspaceNames, int specStart,
   pyInput += QString::number(specStart);
   pyInput += ",";
   pyInput += QString::number(specEnd + 1);
-  pyInput += ")), error_bars = True)\n";
+  pyInput += ")))\n";
 
   m_pythonRunner.runPythonCode(pyInput);
 }
@@ -355,7 +355,7 @@ void IndirectTab::plotSpectra(const QStringList &workspaceNames,
     pyInput += " ,";
     pyInput += QString::number(wsIndices[i]);
   }
-  pyInput += "], error_bars = True)\n";
+  pyInput += "])\n";
   m_pythonRunner.runPythonCode(pyInput);
 }
 
@@ -418,7 +418,7 @@ void IndirectTab::plotTimeBin(const QStringList &workspaceNames, int binIndex) {
   pyInput += workspaceNames.join("','");
   pyInput += "'], ";
   pyInput += QString::number(binIndex);
-  pyInput += ", error_bars=True)\n";
+  pyInput += ")\n";
 
   m_pythonRunner.runPythonCode(pyInput);
 }
diff --git a/MantidQt/CustomInterfaces/src/Indirect/IqtFit.cpp b/MantidQt/CustomInterfaces/src/Indirect/IqtFit.cpp
index ebb15944ddbfeb597eb52297081d508b2c8efb05..c51cea3e7cdea0eabf77d15a7a129d5ac8f62e6b 100644
--- a/MantidQt/CustomInterfaces/src/Indirect/IqtFit.cpp
+++ b/MantidQt/CustomInterfaces/src/Indirect/IqtFit.cpp
@@ -730,7 +730,7 @@ void IqtFit::constrainIntensities(CompositeFunction_sptr func) {
   switch (m_uiForm.cbFitType->currentIndex()) {
   case 0: // 1 Exp
   case 2: // 1 Str
-    if (!func->isFixed(index)) {
+    if (func->isActive(index)) {
       func->tie(paramName, "1-f0.A0");
     } else {
       std::string paramValue =
@@ -741,7 +741,7 @@ void IqtFit::constrainIntensities(CompositeFunction_sptr func) {
     break;
   case 1: // 2 Exp
   case 3: // 1 Exp & 1 Str
-    if (!func->isFixed(index)) {
+    if (func->isActive(index)) {
       func->tie(paramName, "1-f2.Height-f0.A0");
     } else {
       std::string paramValue =
diff --git a/MantidQt/CustomInterfaces/src/Muon/MuonAnalysis.cpp b/MantidQt/CustomInterfaces/src/Muon/MuonAnalysis.cpp
index ec02f8c16bb24ae20e1dc556c80f964fbc80c75b..a3a8758ae40a38e78356e45513e40d79ccf597e8 100644
--- a/MantidQt/CustomInterfaces/src/Muon/MuonAnalysis.cpp
+++ b/MantidQt/CustomInterfaces/src/Muon/MuonAnalysis.cpp
@@ -318,6 +318,17 @@ void MuonAnalysis::initLayout() {
   // Manage User Directories
   connect(m_uiForm.manageDirectoriesBtn, SIGNAL(clicked()), this,
           SLOT(openDirectoryDialog()));
+  connect(this, SIGNAL(setChosenGroupSignal(QString &)), this,
+          SLOT(setChosenGroupSlot(QString &)));
+  connect(this, SIGNAL(setChosenPeriodSignal(QString &)), this,
+          SLOT(setChosenPeriodSlot(QString &)));
+}
+
+void MuonAnalysis::setChosenGroupSlot(QString &group) {
+  m_uiForm.fitBrowser->setChosenGroup(group);
+}
+void MuonAnalysis::setChosenPeriodSlot(QString &period) {
+  m_uiForm.fitBrowser->setChosenPeriods(period);
 }
 
 /**
@@ -398,7 +409,9 @@ void MuonAnalysis::plotSelectedItem() {
 void MuonAnalysis::plotItem(ItemType itemType, int tableRow,
                             PlotType plotType) {
   m_updating = true;
-  m_dataSelector->clearChosenGroups();
+  m_uiForm.fitBrowser->clearChosenGroups();
+  m_uiForm.fitBrowser->clearChosenPeriods();
+
   AnalysisDataServiceImpl &ads = AnalysisDataService::Instance();
 
   try {
@@ -1844,8 +1857,8 @@ void MuonAnalysis::selectMultiPeak(const QString &wsName,
                    std::back_inserter(groupsAndPairs), &QString::fromStdString);
     std::transform(groups.pairNames.begin(), groups.pairNames.end(),
                    std::back_inserter(groupsAndPairs), &QString::fromStdString);
-    m_dataSelector->setAvailableGroups(groupsAndPairs);
-    m_dataSelector->setNumPeriods(m_numPeriods);
+    m_uiForm.fitBrowser->setAvailableGroups(groupsAndPairs);
+    m_uiForm.fitBrowser->setNumPeriods(m_numPeriods);
 
     // Set the selected run, group/pair and period
     m_fitDataPresenter->setAssignedFirstRun(wsName, filePath);
@@ -2126,6 +2139,11 @@ void MuonAnalysis::loadFittings() {
           SLOT(dataToFitChanged()));
   connect(m_uiForm.plotCreation, SIGNAL(currentIndexChanged(int)), this,
           SLOT(updateDataPresenterOverwrite(int)));
+  connect(m_uiForm.fitBrowser, SIGNAL(groupBoxClicked()), this,
+          SLOT(handleGroupBox()));
+  connect(m_uiForm.fitBrowser, SIGNAL(periodBoxClicked()), this,
+          SLOT(handlePeriodBox()));
+
   m_fitDataPresenter->setOverwrite(isOverwriteEnabled());
   // Set multi fit mode on/off as appropriate
   const auto &multiFitState = m_optionTab->getMultiFitState();
@@ -2134,7 +2152,43 @@ void MuonAnalysis::loadFittings() {
   const auto &TFAsymmState = m_optionTab->getTFAsymmState();
   setTFAsymm(TFAsymmState);
 }
-
+/**
+* Handle "groups" selected/deselected
+* Update stored value
+*/
+void MuonAnalysis::handleGroupBox() {
+  // send the group to dataselector
+  m_dataSelector->setGroupsSelected(m_uiForm.fitBrowser->getChosenGroups());
+  // update labels for single fit
+  auto names = m_fitDataPresenter->generateWorkspaceNames(true);
+  if (names.size() == 1) {
+    updateLabels(names[0]);
+  }
+  m_fitDataPresenter->handleSelectedDataChanged(true);
+}
+/**
+* Handle"periods" selected/deselected
+* Update stored value
+*/
+void MuonAnalysis::handlePeriodBox() {
+  // send the group to dataselector
+  m_dataSelector->setPeriodsSelected(m_uiForm.fitBrowser->getChosenPeriods());
+  // update labels for single fit
+  auto names = m_fitDataPresenter->generateWorkspaceNames(true);
+  if (names.size() == 1) {
+    updateLabels(names[0]);
+  }
+  m_fitDataPresenter->handleSelectedDataChanged(true);
+}
+/**
+* Updates the labels (legend and ws) for
+* a single fit when within the mulit-
+* fit GUI.
+* @param name :: the name for the label.
+*/
+void MuonAnalysis::updateLabels(std::string &name) {
+  m_uiForm.fitBrowser->setOutputName(name);
+}
 /**
  * Allow/disallow loading.
  */
@@ -2424,8 +2478,11 @@ void MuonAnalysis::changeTab(int newTabIndex) {
   if (newTab == m_uiForm.DataAnalysis) // Entering DA tab
   {
     // Save last fitting range
-    auto xmin = m_dataSelector->getStartTime();
-    auto xmax = m_dataSelector->getEndTime();
+    auto xmin = m_uiForm.fitBrowser->startX();
+    auto xmax = m_uiForm.fitBrowser->endX();
+    // make sure data selector has same values
+    m_dataSelector->setStartTime(xmin);
+    m_dataSelector->setEndTime(xmax);
 
     // Say MantidPlot to use Muon Analysis fit prop. browser
     emit setFitPropertyBrowser(m_uiForm.fitBrowser);
@@ -2456,6 +2513,13 @@ void MuonAnalysis::changeTab(int newTabIndex) {
     // repeat setting the fitting ranges as the above code can set them to an
     // unwanted default value
     setFittingRanges(xmin, xmax);
+    // make sure groups are not on if single fit
+    if (m_optionTab->getMultiFitState() == Muon::MultiFitState::Disabled) {
+      m_uiForm.fitBrowser->setSingleFitLabel(m_currentDataName.toStdString());
+    } else {
+      m_uiForm.fitBrowser->setAllGroups();
+      m_uiForm.fitBrowser->setChosenPeriods("1");
+    }
   } else if (newTab == m_uiForm.ResultsTable) {
     m_resultTableTab->refresh();
   }
@@ -3064,9 +3128,6 @@ void MuonAnalysis::multiFitCheckboxChanged(int state) {
     setTFAsymm(Muon::TFAsymmState::Disabled);
   }
   m_fitFunctionPresenter->setMultiFitState(multiFitState);
-  if (multiFitState == Muon::MultiFitState::Disabled) {
-    m_dataSelector->clearChosenGroups();
-  }
 }
 /**
 * Called when the "TF Asymmetry" checkbox is changed (settings tab.)
diff --git a/MantidQt/CustomInterfaces/src/Muon/MuonAnalysisFitDataPresenter.cpp b/MantidQt/CustomInterfaces/src/Muon/MuonAnalysisFitDataPresenter.cpp
index 13f3ee185166939d094a36ea6284ebf7cd7e0486..69f5de4123504dcdc93b449cde2106f5b2649d14 100644
--- a/MantidQt/CustomInterfaces/src/Muon/MuonAnalysisFitDataPresenter.cpp
+++ b/MantidQt/CustomInterfaces/src/Muon/MuonAnalysisFitDataPresenter.cpp
@@ -401,7 +401,8 @@ MuonAnalysisFitDataPresenter::createWorkspace(const std::string &name,
     if (params.periods.empty()) {
       analysisOptions.summedPeriods = "1";
     } else {
-      std::replace(params.periods.begin(), params.periods.end(), ',', '+');
+      // need a comma seperated list
+      std::replace(params.periods.begin(), params.periods.end(), '+', ',');
       const size_t minus = params.periods.find('-');
       analysisOptions.summedPeriods = params.periods.substr(0, minus);
       if (minus != std::string::npos && minus != params.periods.size()) {
@@ -794,10 +795,10 @@ void MuonAnalysisFitDataPresenter::setUpDataSelector(
   const auto &groups = m_dataSelector->getChosenGroups();
   const auto &periods = m_dataSelector->getPeriodSelections();
   if (!groups.contains(groupToSet)) {
-    m_dataSelector->setChosenGroup(groupToSet);
+    emit setChosenGroupSignal(groupToSet);
   }
   if (!periodToSet.isEmpty() && !periods.contains(periodToSet)) {
-    m_dataSelector->setChosenPeriod(periodToSet);
+    emit setChosenPeriodSignal(periodToSet);
   }
 
   // If given an optional file path to "current run", cache it for later use
diff --git a/MantidQt/CustomInterfaces/src/Reflectometry/QtReflEventView.cpp b/MantidQt/CustomInterfaces/src/Reflectometry/QtReflEventView.cpp
index 00eb1af537f0195f866b83881bb74602fc737bfe..35b6542845f1bc4cace413c79af6b1d375a3d368 100644
--- a/MantidQt/CustomInterfaces/src/Reflectometry/QtReflEventView.cpp
+++ b/MantidQt/CustomInterfaces/src/Reflectometry/QtReflEventView.cpp
@@ -13,10 +13,17 @@ QtReflEventView::QtReflEventView(QWidget *parent) {
   UNUSED_ARG(parent);
   initLayout();
 
+  // Insert slice-type to string pairs
+  m_sliceTypeMap[SliceType::UniformEven] = "UniformEven";
+  m_sliceTypeMap[SliceType::Uniform] = "Uniform";
+  m_sliceTypeMap[SliceType::Custom] = "Custom";
+  m_sliceTypeMap[SliceType::LogValue] = "LogValue";
+
   // Add slicing option buttons to list
   m_buttonList.push_back(m_ui.uniformEvenButton);
   m_buttonList.push_back(m_ui.uniformButton);
   m_buttonList.push_back(m_ui.customButton);
+  m_buttonList.push_back(m_ui.logValueButton);
 
   // Whenever one of the slicing option buttons is selected, their corresponding
   // entry is enabled, otherwise they remain disabled.
@@ -54,20 +61,34 @@ std::string QtReflEventView::getTimeSlicingValues() const {
 
   std::string values;
 
-  if (m_sliceType == "UniformEven")
+  switch (m_sliceType) {
+  case SliceType::UniformEven:
     values = m_ui.uniformEvenEdit->text().toStdString();
-  else if (m_sliceType == "Uniform")
+    break;
+  case SliceType::Uniform:
     values = m_ui.uniformEdit->text().toStdString();
-  else if (m_sliceType == "Custom")
+    break;
+  case SliceType::Custom:
     values = m_ui.customEdit->text().toStdString();
+    break;
+  case SliceType::LogValue:
+    std::string slicingValues = m_ui.logValueEdit->text().toStdString();
+    std::string logFilter = m_ui.logValueTypeEdit->text().toStdString();
+    if (!slicingValues.empty() && !logFilter.empty())
+      values = "Slicing=\"" + slicingValues + "\",LogFilter=" + logFilter;
+    break;
+  }
 
   return values;
 }
 
-/** Returns the type of time slicing that was selected
+/** Returns the type of time slicing that was selected as string
 * @return :: Time slicing type
 */
-std::string QtReflEventView::getTimeSlicingType() const { return m_sliceType; }
+std::string QtReflEventView::getTimeSlicingType() const {
+
+  return m_sliceTypeMap.at(m_sliceType);
+}
 
 /** Enable slicing option entries for checked button and disable all others.
 */
@@ -75,8 +96,8 @@ void QtReflEventView::toggleSlicingOptions() const {
 
   const auto checkedButton = m_ui.slicingOptionsButtonGroup->checkedButton();
 
-  const std::vector<std::string> slicingTypes = {"UniformEven", "Uniform",
-                                                 "Custom"};
+  SliceType slicingTypes[4] = {SliceType::UniformEven, SliceType::Uniform,
+                               SliceType::Custom, SliceType::LogValue};
 
   std::vector<bool> entriesEnabled(m_buttonList.size(), false);
   for (size_t i = 0; i < m_buttonList.size(); i++) {
@@ -87,9 +108,20 @@ void QtReflEventView::toggleSlicingOptions() const {
     }
   }
 
+  // UniformEven
   m_ui.uniformEvenEdit->setEnabled(entriesEnabled[0]);
+  m_ui.uniformEvenLabel->setEnabled(entriesEnabled[0]);
+  // Uniform
   m_ui.uniformEdit->setEnabled(entriesEnabled[1]);
+  m_ui.uniformLabel->setEnabled(entriesEnabled[1]);
+  // Custom
   m_ui.customEdit->setEnabled(entriesEnabled[2]);
+  m_ui.customLabel->setEnabled(entriesEnabled[2]);
+  // LogValue
+  m_ui.logValueEdit->setEnabled(entriesEnabled[3]);
+  m_ui.logValueLabel->setEnabled(entriesEnabled[3]);
+  m_ui.logValueTypeEdit->setEnabled(entriesEnabled[3]);
+  m_ui.logValueTypeLabel->setEnabled(entriesEnabled[3]);
 }
 
 } // namespace CustomInterfaces
diff --git a/MantidQt/CustomInterfaces/src/Reflectometry/ReflDataProcessorPresenter.cpp b/MantidQt/CustomInterfaces/src/Reflectometry/ReflDataProcessorPresenter.cpp
index 93f15734fcd07c050ad853576e12a3da00a96df0..ca6b309327e89e630eaaaf66ffc152750fb69a77 100644
--- a/MantidQt/CustomInterfaces/src/Reflectometry/ReflDataProcessorPresenter.cpp
+++ b/MantidQt/CustomInterfaces/src/Reflectometry/ReflDataProcessorPresenter.cpp
@@ -5,6 +5,7 @@
 #include "MantidAPI/Run.h"
 #include "MantidQtMantidWidgets/DataProcessorUI/DataProcessorTreeManager.h"
 #include "MantidQtMantidWidgets/DataProcessorUI/DataProcessorView.h"
+#include "MantidQtMantidWidgets/DataProcessorUI/ParseKeyValueString.h"
 #include "MantidQtMantidWidgets/ProgressPresenter.h"
 
 using namespace MantidQt::MantidWidgets;
@@ -183,10 +184,13 @@ bool ReflDataProcessorPresenter::processGroupAsEventWS(
   size_t numGroupSlices = INT_MAX;
 
   std::vector<double> startTimes, stopTimes;
+  std::string logFilter; // Set if we are slicing by log value
 
-  // For custom slicing, the start/stop times are the same for all rows
+  // For custom/log value slicing the start/stop times are the same for all rows
   if (timeSlicingType == "Custom")
     parseCustom(timeSlicingValues, startTimes, stopTimes);
+  if (timeSlicingType == "LogValue")
+    parseLogValue(timeSlicingValues, logFilter, startTimes, stopTimes);
 
   for (const auto &row : group) {
 
@@ -194,7 +198,7 @@ bool ReflDataProcessorPresenter::processGroupAsEventWS(
     const auto data = row.second;         // Vector containing data for this row
     std::string runNo = row.second.at(0); // The run number
 
-    if (timeSlicingType != "Custom") {
+    if (timeSlicingType == "UniformEven" || timeSlicingType == "Uniform") {
       const std::string runName = "TOF_" + runNo;
       parseUniform(timeSlicingValues, timeSlicingType, runName, startTimes,
                    stopTimes);
@@ -205,8 +209,9 @@ bool ReflDataProcessorPresenter::processGroupAsEventWS(
 
     for (size_t i = 0; i < numSlices; i++) {
       try {
-        auto wsName = takeSlice(runNo, i, startTimes[i], stopTimes[i]);
         std::vector<std::string> slice(data);
+        std::string wsName =
+            takeSlice(runNo, i, startTimes[i], stopTimes[i], logFilter);
         slice[0] = wsName;
         auto newData = reduceRow(slice);
         newData[0] = data[0];
@@ -225,8 +230,8 @@ bool ReflDataProcessorPresenter::processGroupAsEventWS(
   // Post-process (if needed)
   if (multiRow) {
 
-    // All slices are common for uniform even or custom slicing
-    if (timeSlicingType == "UniformEven" || timeSlicingType == "Custom")
+    // All slices are common for uniform even, custom and log value slicing
+    if (timeSlicingType != "Uniform")
       numGroupSlices = startTimes.size();
 
     addNumGroupSlicesEntry(groupID, numGroupSlices);
@@ -351,28 +356,47 @@ void ReflDataProcessorPresenter::parseCustom(const std::string &timeSlicing,
   std::transform(timesStr.begin(), timesStr.end(), std::back_inserter(times),
                  [](const std::string &astr) { return std::stod(astr); });
 
-  size_t numTimes = times.size();
+  size_t numSlices = times.size() > 1 ? times.size() - 1 : 1;
 
   // Add the start/stop times
-  startTimes = std::vector<double>(numTimes - 1);
-  stopTimes = std::vector<double>(numTimes - 1);
+  startTimes = std::vector<double>(numSlices);
+  stopTimes = std::vector<double>(numSlices);
 
-  if (numTimes == 1) {
+  if (times.size() == 1) {
     startTimes[0] = 0;
     stopTimes[0] = times[0];
   } else {
-    for (size_t i = 0; i < numTimes - 1; i++) {
+    for (size_t i = 0; i < numSlices; i++) {
       startTimes[i] = times[i];
       stopTimes[i] = times[i + 1];
     }
   }
 }
 
-/** Loads an event workspace and puts it into the ADS
+/** Parses a string to extract log value filter and time slicing
  *
- * @param runNo :: the run number as a string
- * @return :: True if algorithm was executed. False otherwise
+ * @param inputStr :: The string to parse
+ * @param logFilter :: The log filter to use
+ * @param startTimes :: Start times for the set of slices
+ * @param stopTimes :: Stop times for the set of slices
  */
+void ReflDataProcessorPresenter::parseLogValue(const std::string &inputStr,
+                                               std::string &logFilter,
+                                               std::vector<double> &startTimes,
+                                               std::vector<double> &stopTimes) {
+
+  auto strMap = parseKeyValueString(inputStr);
+  std::string timeSlicing = strMap.at("Slicing");
+  logFilter = strMap.at("LogFilter");
+
+  parseCustom(timeSlicing, startTimes, stopTimes);
+}
+
+/** Loads an event workspace and puts it into the ADS
+*
+* @param runNo :: The run number as a string
+* @return :: True if algorithm was executed. False otherwise
+*/
 bool ReflDataProcessorPresenter::loadEventRun(const std::string &runNo) {
 
   bool runFound;
@@ -394,9 +418,9 @@ bool ReflDataProcessorPresenter::loadEventRun(const std::string &runNo) {
 }
 
 /** Loads a non-event workspace and puts it into the ADS
- *
- * @param runNo :: the run number as a string
- */
+*
+* @param runNo :: The run number as a string
+*/
 void ReflDataProcessorPresenter::loadNonEventRun(const std::string &runNo) {
 
   bool runFound; // unused but required
@@ -445,42 +469,52 @@ std::string ReflDataProcessorPresenter::loadRun(const std::string &run,
 
 /** Takes a slice from a run and puts the 'sliced' workspace into the ADS
 *
-* @param runNo :: the run number as a string
-* @param sliceIndex :: the index of the slice being taken
-* @param startTime :: start time
-* @param stopTime :: stop time
+* @param runNo :: The run number as a string
+* @param sliceIndex :: The index of the slice being taken
+* @param startTime :: Start time
+* @param stopTime :: Stop time
+* @param logFilter :: The log filter to use if slicing by log value
 * @return :: the name of the sliced workspace (without prefix 'TOF_')
 */
-std::string ReflDataProcessorPresenter::takeSlice(const std::string &runNo,
-                                                  size_t sliceIndex,
-                                                  double startTime,
-                                                  double stopTime) {
+std::string ReflDataProcessorPresenter::takeSlice(
+    const std::string &runNo, size_t sliceIndex, double startTime,
+    double stopTime, const std::string &logFilter) {
 
   std::string runName = "TOF_" + runNo;
   std::string sliceName = runName + "_slice_" + std::to_string(sliceIndex);
   std::string monName = runName + "_monitors";
+  std::string filterAlg =
+      logFilter.empty() ? "FilterByTime" : "FilterByLogValue";
 
-  // Filter by time
-  IAlgorithm_sptr filter = AlgorithmManager::Instance().create("FilterByTime");
+  // Filter the run using the appropriate filter algorithm
+  IAlgorithm_sptr filter = AlgorithmManager::Instance().create(filterAlg);
   filter->initialize();
   filter->setProperty("InputWorkspace", runName);
   filter->setProperty("OutputWorkspace", sliceName);
-  filter->setProperty("StartTime", startTime);
-  filter->setProperty("StopTime", stopTime);
+  if (filterAlg == "FilterByTime") {
+    filter->setProperty("StartTime", startTime);
+    filter->setProperty("StopTime", stopTime);
+  } else { // FilterByLogValue
+    filter->setProperty("MinimumValue", startTime);
+    filter->setProperty("MaximumValue", stopTime);
+    filter->setProperty("TimeTolerance", 1.0);
+    filter->setProperty("LogName", logFilter);
+  }
+
   filter->execute();
 
-  // Get the normalization constant for this slice
-  MatrixWorkspace_sptr mws =
-      AnalysisDataService::Instance().retrieveWS<MatrixWorkspace>(runName);
+  // Obtain the normalization constant for this slice
+  IEventWorkspace_sptr mws =
+      AnalysisDataService::Instance().retrieveWS<IEventWorkspace>(runName);
   double total = mws->run().getProtonCharge();
-  mws = AnalysisDataService::Instance().retrieveWS<MatrixWorkspace>(sliceName);
+  mws = AnalysisDataService::Instance().retrieveWS<IEventWorkspace>(sliceName);
   double slice = mws->run().getProtonCharge();
-  double fraction = slice / total;
+  double scaleFactor = slice / total;
 
   IAlgorithm_sptr scale = AlgorithmManager::Instance().create("Scale");
   scale->initialize();
   scale->setProperty("InputWorkspace", monName);
-  scale->setProperty("Factor", fraction);
+  scale->setProperty("Factor", scaleFactor);
   scale->setProperty("OutputWorkspace", "__" + monName + "_temp");
   scale->execute();
 
diff --git a/MantidQt/CustomInterfaces/src/SANSRunWindow.cpp b/MantidQt/CustomInterfaces/src/SANSRunWindow.cpp
index c304f7ca2fca20570033be5ecb1b15a578b5c1ce..e7935cf7cf3fc55326acde21cb0a5f0895b06fef 100644
--- a/MantidQt/CustomInterfaces/src/SANSRunWindow.cpp
+++ b/MantidQt/CustomInterfaces/src/SANSRunWindow.cpp
@@ -2483,6 +2483,9 @@ void SANSRunWindow::handleReduceButtonClick(const QString &typeStr) {
       return;
     }
 
+    // Update the IDF file path for batch reductions
+    updateIDFFilePathForBatch();
+
     // check for the detectors combination option
     // transform the SANS Diagnostic gui option in: 'rear', 'front' , 'both',
     // 'merged', None WavRangeReduction option
@@ -5109,19 +5112,32 @@ bool SANSRunWindow::isValidUserFile() {
   return true;
 }
 
-void SANSRunWindow::updateIDFFilePath() {
-  QString getIdf = "i.get_current_idf_path_in_reducer()\n";
-  QString resultIdf(runPythonCode(getIdf, false));
-  auto teset1 = resultIdf.toStdString();
+void SANSRunWindow::updateIDFInfo(const QString &command) {
+  QString resultIdf(runPythonCode(command, false));
   resultIdf = resultIdf.simplified();
-  auto test2 = resultIdf.toStdString();
   if (resultIdf != m_constants.getPythonEmptyKeyword() &&
       !resultIdf.isEmpty()) {
-    auto test = resultIdf.toStdString();
     m_uiForm.current_idf_path->setText(resultIdf);
   }
 }
 
+void SANSRunWindow::updateIDFFilePathForBatch() {
+
+  if (m_uiForm.batch_table->rowCount() == 0) {
+    return;
+  }
+  // We base the IDF entry on the sample scatter entry of the first row
+  auto *table_item = m_uiForm.batch_table->item(0, 0);
+  auto scatter_sample_run = table_item->text();
+  QString getIdf = "i.get_idf_path_for_run(\"" + scatter_sample_run + "\")\n";
+  updateIDFInfo(getIdf);
+}
+
+void SANSRunWindow::updateIDFFilePath() {
+  QString getIdf = "i.get_current_idf_path_in_reducer()\n";
+  updateIDFInfo(getIdf);
+}
+
 void SANSRunWindow::onUpdateGeometryRequest() {
   auto sampleWidth = m_uiForm.sample_width->text();
   auto sampleHeight = m_uiForm.sample_height->text();
diff --git a/MantidQt/CustomInterfaces/test/MuonAnalysisFitDataPresenterTest.h b/MantidQt/CustomInterfaces/test/MuonAnalysisFitDataPresenterTest.h
index 316d13bc97414722e1e90f453e58ffbb00da8035..589e1c45ffde3e114f02db75276a645af3e407af 100644
--- a/MantidQt/CustomInterfaces/test/MuonAnalysisFitDataPresenterTest.h
+++ b/MantidQt/CustomInterfaces/test/MuonAnalysisFitDataPresenterTest.h
@@ -48,22 +48,20 @@ operator<<(std::basic_ostream<CharType, CharTrait> &out,
   return out;
 }
 }
-
 /// Mock data selector widget
 class MockDataSelector : public IMuonFitDataSelector {
 public:
   GCC_DIAG_OFF_SUGGEST_OVERRIDE
+
   MOCK_CONST_METHOD0(getFilenames, QStringList());
   MOCK_CONST_METHOD0(getStartTime, double());
   MOCK_CONST_METHOD0(getEndTime, double());
-  MOCK_METHOD1(setNumPeriods, void(size_t));
-  MOCK_METHOD1(setChosenPeriod, void(const QString &));
+  MOCK_METHOD1(setPeriodsSelected, void(const QStringList &));
   MOCK_CONST_METHOD0(getPeriodSelections, QStringList());
   MOCK_METHOD3(setWorkspaceDetails, void(const QString &, const QString &,
                                          const boost::optional<QString> &));
-  MOCK_METHOD1(setAvailableGroups, void(const QStringList &));
   MOCK_CONST_METHOD0(getChosenGroups, QStringList());
-  MOCK_METHOD1(setChosenGroup, void(const QString &));
+  MOCK_METHOD1(setGroupsSelected, void(const QStringList &));
   MOCK_METHOD1(setStartTime, void(double));
   MOCK_METHOD1(setEndTime, void(double));
   MOCK_METHOD1(setStartTimeQuietly, void(double));
@@ -95,6 +93,9 @@ public:
   MOCK_METHOD1(userChangedDataset, void(int));
   MOCK_CONST_METHOD0(rawData, bool());
   MOCK_METHOD1(continueAfterChecks, void(bool));
+  MOCK_METHOD1(setNumPeriods, void(size_t));
+  MOCK_METHOD1(setAvailableGroups, void(const QStringList &));
+  MOCK_METHOD1(setChosenGroup, void(const QString &));
   void preFitChecksRequested(bool sequential) override {
     UNUSED_ARG(sequential);
   }
@@ -205,9 +206,11 @@ public:
     EXPECT_CALL(*m_dataSelector,
                 setWorkspaceDetails(QString("00015189-91"), QString("MUSR"),
                                     Eq(boost::optional<QString>{}))).Times(1);
-    EXPECT_CALL(*m_dataSelector, setChosenGroup(QString("long"))).Times(1);
-    EXPECT_CALL(*m_dataSelector, setChosenPeriod(QString("1"))).Times(1);
-    m_presenter->setAssignedFirstRun(wsName, boost::none);
+    EXPECT_CALL(*m_dataSelector, setPeriodsSelected(QStringList({"1"})))
+        .Times(1);
+    EXPECT_CALL(*m_dataSelector, setGroupsSelected(QStringList({"long"})))
+        .Times(1);
+    localSetAssignedFirstRun(wsName, boost::none);
   }
 
   void test_setAssignedFirstRun_nonContiguousRange() {
@@ -217,9 +220,12 @@ public:
                 setWorkspaceDetails(QString("00015189-91, 15193"),
                                     QString("MUSR"),
                                     Eq(boost::optional<QString>{}))).Times(1);
-    EXPECT_CALL(*m_dataSelector, setChosenGroup(QString("long"))).Times(1);
-    EXPECT_CALL(*m_dataSelector, setChosenPeriod(QString("1"))).Times(1);
-    m_presenter->setAssignedFirstRun(wsName, boost::none);
+    EXPECT_CALL(*m_dataSelector, setGroupsSelected(QStringList({"long"})))
+        .Times(1);
+    EXPECT_CALL(*m_dataSelector, setPeriodsSelected(QStringList({"1"})))
+        .Times(1);
+    // m_presenter->setAssignedFirstRun(wsName, boost::none);
+    localSetAssignedFirstRun(wsName, boost::none);
   }
 
   void test_setAssignedFirstRun_alreadySet() {
@@ -228,9 +234,12 @@ public:
     m_presenter->setAssignedFirstRun(wsName, boost::none);
     EXPECT_CALL(*m_dataSelector, setWorkspaceDetails(_, _, _)).Times(0);
     EXPECT_CALL(*m_fitBrowser, allowSequentialFits(_)).Times(0);
-    EXPECT_CALL(*m_dataSelector, setChosenGroup(QString("long"))).Times(0);
-    EXPECT_CALL(*m_dataSelector, setChosenPeriod(QString("1"))).Times(0);
-    m_presenter->setAssignedFirstRun(wsName, boost::none);
+    EXPECT_CALL(*m_dataSelector, setGroupsSelected(QStringList({"long"})))
+        .Times(1);
+    EXPECT_CALL(*m_dataSelector, setPeriodsSelected(QStringList({"1"})))
+        .Times(1);
+    // m_presenter->setAssignedFirstRun(wsName, boost::none);
+    localSetAssignedFirstRun(wsName, boost::none);
   }
 
   void test_setAssignedFirstRun_loadCurrentRun() {
@@ -241,13 +250,15 @@ public:
     EXPECT_CALL(*m_dataSelector,
                 setWorkspaceDetails(QString("00061335"), QString("MUSR"),
                                     Eq(currentRunPath))).Times(1);
-    m_presenter->setAssignedFirstRun(wsName, currentRunPath);
+    // m_presenter->setAssignedFirstRun(wsName, currentRunPath);
+    localSetAssignedFirstRun(wsName, currentRunPath);
   }
 
   void test_getAssignedFirstRun() {
     setupGroupPeriodSelections();
     const QString wsName("MUSR00015189; Pair; long; Asym; 1; #1");
-    m_presenter->setAssignedFirstRun(wsName, boost::none);
+    // m_presenter->setAssignedFirstRun(wsName, boost::none);
+    localSetAssignedFirstRun(wsName, boost::none);
     TS_ASSERT_EQUALS(wsName, m_presenter->getAssignedFirstRun());
   }
 
@@ -683,42 +694,13 @@ public:
     EXPECT_CALL(*m_dataSelector,
                 setWorkspaceDetails(QString("00015189-91"), QString("MUSR"),
                                     Eq(boost::none))).Times(1);
-    EXPECT_CALL(*m_dataSelector, setChosenGroup(QString("fwd"))).Times(1);
-    EXPECT_CALL(*m_dataSelector, setChosenPeriod(QString("1"))).Times(1);
-
-    m_presenter->setSelectedWorkspace(wsName, boost::none);
-  }
-
-  void test_setSelectedWorkspace_groupsAlreadySelected_shouldNotUnselect() {
-    const QString wsName("MUSR00015189-91; Group; fwd; Asym; 1; #6");
-
-    // Groups "fwd" and "bwd" are already selected
-    ON_CALL(*m_dataSelector, getChosenGroups())
-        .WillByDefault(Return(QStringList{"fwd", "bwd"}));
-    ON_CALL(*m_dataSelector, getPeriodSelections())
-        .WillByDefault(Return(QStringList{}));
-
-    // It should NOT deselect the already selected groups
-    EXPECT_CALL(*m_dataSelector, setChosenGroup(_)).Times(0);
-
-    m_presenter->setSelectedWorkspace(wsName, boost::none);
-  }
-
-  void test_setSelectedWorkspace_periodsAlreadySelected_shouldNotUnselect() {
-    const QString wsName("MUSR00015189-91; Group; fwd; Asym; 1; #6");
-
-    // Periods 1 and 2 are already selected
-    ON_CALL(*m_dataSelector, getPeriodSelections())
-        .WillByDefault(Return(QStringList{"1", "2"}));
-    ON_CALL(*m_dataSelector, getChosenGroups())
-        .WillByDefault(Return(QStringList{}));
-
-    // It should NOT deselect the already selected periods
-    EXPECT_CALL(*m_dataSelector, setChosenPeriod(_)).Times(0);
+    EXPECT_CALL(*m_dataSelector, setGroupsSelected(QStringList({"fwd"})))
+        .Times(1);
+    EXPECT_CALL(*m_dataSelector, setPeriodsSelected(QStringList({"1"})))
+        .Times(1);
 
-    m_presenter->setSelectedWorkspace(wsName, boost::none);
+    localSetSelectedWorkspace(wsName, boost::none);
   }
-
   void test_setSelectedWorkspace_loadCurrentRun() {
     setupGroupPeriodSelections();
     const QString wsName("MUSR00061335; Group; fwd; Asym; 1; #1");
@@ -735,10 +717,12 @@ public:
     EXPECT_CALL(*m_dataSelector,
                 setWorkspaceDetails(QString("00061335"), QString("MUSR"),
                                     Eq(currentRunPath))).Times(1);
-    EXPECT_CALL(*m_dataSelector, setChosenGroup(QString("fwd"))).Times(1);
-    EXPECT_CALL(*m_dataSelector, setChosenPeriod(QString("1"))).Times(1);
+    EXPECT_CALL(*m_dataSelector, setGroupsSelected(QStringList({"fwd"})))
+        .Times(1);
+    EXPECT_CALL(*m_dataSelector, setPeriodsSelected(QStringList({"1"})))
+        .Times(1);
 
-    m_presenter->setSelectedWorkspace(wsName, currentRunPath);
+    localSetSelectedWorkspace(wsName, currentRunPath);
   }
 
   void test_doPreFitChecks_nonSequential_invalidRuns_doesNotFit() {
@@ -1081,6 +1065,36 @@ private:
     m_presenter->doPreFitChecks(sequential);
   }
 
+  /// method to manually set up workspace
+  /// this is a work around for the signal/slots
+  void localSetAssignedFirstRun(const QString &wsName,
+                                const boost::optional<QString> &filepath) {
+    m_presenter->setAssignedFirstRun(wsName, filepath);
+    // manually replicate signal
+    const auto wsParams =
+        MantidQt::CustomInterfaces::MuonAnalysisHelper::parseWorkspaceName(
+            wsName.toStdString());
+    m_dataSelector->setPeriodsSelected(
+        QStringList{QString::fromStdString(wsParams.periods)});
+    m_dataSelector->setGroupsSelected(
+        QStringList{QString::fromStdString(wsParams.itemName)});
+  }
+
+  /// method to manually set up workspace
+  /// this is a work around for the signal/slots
+  void localSetSelectedWorkspace(const QString &wsName,
+                                 const boost::optional<QString> &filepath) {
+    m_presenter->setSelectedWorkspace(wsName, filepath);
+    // manually replicate signal
+    const auto wsParams =
+        MantidQt::CustomInterfaces::MuonAnalysisHelper::parseWorkspaceName(
+            wsName.toStdString());
+    m_dataSelector->setPeriodsSelected(
+        QStringList{QString::fromStdString(wsParams.periods)});
+    m_dataSelector->setGroupsSelected(
+        QStringList{QString::fromStdString(wsParams.itemName)});
+  }
+
   MockDataSelector *m_dataSelector;
   MockFitBrowser *m_fitBrowser;
   MuonAnalysisFitDataPresenter *m_presenter;
diff --git a/MantidQt/CustomInterfaces/test/ReflDataProcessorPresenterTest.h b/MantidQt/CustomInterfaces/test/ReflDataProcessorPresenterTest.h
index 738733de5a755523a9ae9791457cf4279972d457..32b7d900156f1f530d61ff288c3609a057c978aa 100644
--- a/MantidQt/CustomInterfaces/test/ReflDataProcessorPresenterTest.h
+++ b/MantidQt/CustomInterfaces/test/ReflDataProcessorPresenterTest.h
@@ -432,6 +432,104 @@ public:
     TS_ASSERT(Mock::VerifyAndClearExpectations(&mockMainPresenter));
   }
 
+  void testProcessEventWorkspacesLogValueSlicing() {
+    NiceMock<MockDataProcessorView> mockDataProcessorView;
+    NiceMock<MockProgressableView> mockProgress;
+    NiceMock<MockMainPresenter> mockMainPresenter;
+    auto presenter = presenterFactory.create();
+    presenter->acceptViews(&mockDataProcessorView, &mockProgress);
+    presenter->accept(&mockMainPresenter);
+
+    createPrefilledWorkspace("TestWorkspace", presenter->getWhiteList());
+    EXPECT_CALL(mockDataProcessorView, getWorkspaceToOpen())
+        .Times(1)
+        .WillRepeatedly(Return("TestWorkspace"));
+    TS_ASSERT_THROWS_NOTHING(
+        presenter->notify(DataProcessorPresenter::OpenTableFlag));
+
+    std::set<int> groupList;
+    groupList.insert(0);
+
+    // We should not receive any errors
+    EXPECT_CALL(mockMainPresenter, giveUserCritical(_, _)).Times(0);
+
+    // The user hits the "process" button with the first group selected
+    EXPECT_CALL(mockDataProcessorView, getSelectedChildren())
+        .Times(1)
+        .WillRepeatedly(Return(std::map<int, std::set<int>>()));
+    EXPECT_CALL(mockDataProcessorView, getSelectedParents())
+        .Times(1)
+        .WillRepeatedly(Return(groupList));
+    EXPECT_CALL(mockMainPresenter, getTimeSlicingValues())
+        .Times(1)
+        .WillOnce(Return("Slicing=\"0,10,20,30\",LogFilter=proton_charge"));
+    EXPECT_CALL(mockMainPresenter, getTimeSlicingType())
+        .Times(1)
+        .WillOnce(Return("LogValue"));
+    EXPECT_CALL(mockMainPresenter, getPreprocessingValues())
+        .Times(6)
+        .WillRepeatedly(Return(std::map<std::string, std::string>()));
+    EXPECT_CALL(mockMainPresenter, getPreprocessingProperties())
+        .Times(6)
+        .WillRepeatedly(Return(std::map<std::string, std::set<std::string>>()));
+    EXPECT_CALL(mockMainPresenter, getPreprocessingOptions())
+        .Times(6)
+        .WillRepeatedly(Return(std::map<std::string, std::string>()));
+    EXPECT_CALL(mockMainPresenter, getProcessingOptions())
+        .Times(6)
+        .WillRepeatedly(Return(""));
+    EXPECT_CALL(mockMainPresenter, getPostprocessingOptions())
+        .Times(3)
+        .WillRepeatedly(Return(""));
+    EXPECT_CALL(mockDataProcessorView, getEnableNotebook())
+        .Times(1)
+        .WillOnce(Return(false));
+    EXPECT_CALL(mockDataProcessorView, getProcessInstrument())
+        .Times(14)
+        .WillRepeatedly(Return("INTER"));
+    EXPECT_CALL(mockDataProcessorView, requestNotebookPath()).Times(0);
+
+    TS_ASSERT_THROWS_NOTHING(
+        presenter->notify(DataProcessorPresenter::ProcessFlag));
+
+    // Check output workspaces were created as expected
+    for (size_t i = 0; i < 3; i++) {
+      std::string sliceIndex = std::to_string(i);
+
+      TS_ASSERT(AnalysisDataService::Instance().doesExist(
+          "IvsLam_13460_slice_" + sliceIndex));
+      TS_ASSERT(AnalysisDataService::Instance().doesExist(
+          "IvsLam_13462_slice_" + sliceIndex));
+      TS_ASSERT(AnalysisDataService::Instance().doesExist("IvsQ_13460_slice_" +
+                                                          sliceIndex));
+      TS_ASSERT(AnalysisDataService::Instance().doesExist("IvsQ_13462_slice_" +
+                                                          sliceIndex));
+      TS_ASSERT(AnalysisDataService::Instance().doesExist(
+          "IvsQ_13460_slice_" + sliceIndex + "_13462_slice_" + sliceIndex));
+      TS_ASSERT(AnalysisDataService::Instance().doesExist(
+          "IvsQ_binned_13460_slice_" + sliceIndex));
+      TS_ASSERT(AnalysisDataService::Instance().doesExist(
+          "IvsQ_binned_13462_slice_" + sliceIndex));
+      TS_ASSERT(AnalysisDataService::Instance().doesExist("TOF_13460_slice_" +
+                                                          sliceIndex));
+      TS_ASSERT(AnalysisDataService::Instance().doesExist("TOF_13462_slice_" +
+                                                          sliceIndex));
+    }
+    TS_ASSERT(AnalysisDataService::Instance().doesExist("TOF_13460"));
+    TS_ASSERT(AnalysisDataService::Instance().doesExist("TOF_13462"));
+    TS_ASSERT(AnalysisDataService::Instance().doesExist("TOF_13460_monitors"));
+    TS_ASSERT(AnalysisDataService::Instance().doesExist("TOF_13462_monitors"));
+    TS_ASSERT(AnalysisDataService::Instance().doesExist("TRANS_13463"));
+    TS_ASSERT(AnalysisDataService::Instance().doesExist("TRANS_13464"));
+    TS_ASSERT(AnalysisDataService::Instance().doesExist("TRANS_13463_13464"));
+
+    // Tidy up
+    AnalysisDataService::Instance().clear();
+
+    TS_ASSERT(Mock::VerifyAndClearExpectations(&mockDataProcessorView));
+    TS_ASSERT(Mock::VerifyAndClearExpectations(&mockMainPresenter));
+  }
+
   void testProcessWithNotebookWarn() {
     NiceMock<MockDataProcessorView> mockDataProcessorView;
     NiceMock<MockProgressableView> mockProgress;
diff --git a/MantidQt/MantidWidgets/CMakeLists.txt b/MantidQt/MantidWidgets/CMakeLists.txt
index 5a23fcbece2d7ac0b6ab691e1e8393d7be0be18c..6866b8db7ef2216f53a2c7cef9d9db8eebcf2145 100644
--- a/MantidQt/MantidWidgets/CMakeLists.txt
+++ b/MantidQt/MantidWidgets/CMakeLists.txt
@@ -87,7 +87,6 @@ set ( SRC_FILES
 	src/MWDiag.cpp
 	src/MWView.cpp
 	src/MantidHelpWindow.cpp
-	src/MantidSurfacePlotDialog.cpp
 	src/MantidTreeWidget.cpp
 	src/MantidTreeWidgetItem.cpp
 	src/MantidWSIndexDialog.cpp
@@ -154,7 +153,6 @@ set ( MOC_FILES
     inc/MantidQtMantidWidgets/InstrumentSelector.h
     inc/MantidQtMantidWidgets/IndirectInstrumentConfig.h
     inc/MantidQtMantidWidgets/InputController.h
-    inc/MantidQtMantidWidgets/MantidSurfacePlotDialog.h
     inc/MantidQtMantidWidgets/MantidWSIndexDialog.h
     inc/MantidQtMantidWidgets/MantidTreeWidget.h
     inc/MantidQtMantidWidgets/MantidHelpWindow.h
diff --git a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/FitPropertyBrowser.h b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/FitPropertyBrowser.h
index e1ccc74e0921631e140c623b4fac85f3201680f7..9ff471ef2bd0fba0a0d9a2d635f147317f95ff35 100644
--- a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/FitPropertyBrowser.h
+++ b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/FitPropertyBrowser.h
@@ -299,9 +299,9 @@ protected slots:
   /// Called when a bool property is changed
   virtual void boolChanged(QtProperty *prop);
 
+  virtual void enumChanged(QtProperty *prop);
 private slots:
 
-  void enumChanged(QtProperty *prop);
   void intChanged(QtProperty *prop);
   virtual void doubleChanged(QtProperty *prop);
   /// Called when one of the parameter values gets changed
diff --git a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/IMuonFitDataSelector.h b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/IMuonFitDataSelector.h
index 783db6d7f65a96590bf3a0468cd6eddbb7d4da83..c199ae4521f115da701f13a4d5a46b6a9e07ecfe 100644
--- a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/IMuonFitDataSelector.h
+++ b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/IMuonFitDataSelector.h
@@ -21,15 +21,11 @@ public:
   virtual QStringList getFilenames() const = 0;
   virtual double getStartTime() const = 0;
   virtual double getEndTime() const = 0;
-  virtual void setNumPeriods(size_t numPeriods) = 0;
-  virtual void setChosenPeriod(const QString &period) = 0;
   virtual QStringList getPeriodSelections() const = 0;
   virtual void
   setWorkspaceDetails(const QString &runNumbers, const QString &instName,
                       const boost::optional<QString> &filePath) = 0;
-  virtual void setAvailableGroups(const QStringList &groupNames) = 0;
   virtual QStringList getChosenGroups() const = 0;
-  virtual void setChosenGroup(const QString &group) = 0;
   virtual void setStartTime(double start) = 0;
   virtual void setEndTime(double end) = 0;
   virtual void setStartTimeQuietly(double start) = 0;
diff --git a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MantidDisplayBase.h b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MantidDisplayBase.h
index 17d8ea548092e36e549073bb22cd41255ebc1395..b05d2c195058da028cf040242a463ae6f3f53be3 100644
--- a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MantidDisplayBase.h
+++ b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MantidDisplayBase.h
@@ -23,7 +23,6 @@ class MantidMatrix;
 namespace MantidQt {
 namespace MantidWidgets {
 
-class MantidSurfacePlotDialog;
 class MantidWSIndexDialog;
 
 /**
@@ -103,17 +102,25 @@ public:
   plot1D(const QMultiMap<QString, std::set<int>> &toPlot, bool spectrumPlot,
          MantidQt::DistributionFlag distr = MantidQt::DistributionDefault,
          bool errs = false, MultiLayer *plotWindow = NULL,
-         bool clearWindow = false, bool waterfallPlot = false) = 0;
+         bool clearWindow = false, bool waterfallPlot = false,
+         const QString &log = "",
+         const std::set<double> &customLogValues = std::set<double>()) = 0;
   virtual void drawColorFillPlots(
       const QStringList &wsNames,
       GraphOptions::CurveType curveType = GraphOptions::ColorMap) = 0;
   virtual void showMDPlot() = 0;
-  virtual void showSurfacePlot() = 0;
-  virtual void showContourPlot() = 0;
   virtual MultiLayer *
   plotSubplots(const QMultiMap<QString, std::set<int>> &toPlot,
                MantidQt::DistributionFlag distr = MantidQt::DistributionDefault,
                bool errs = false, MultiLayer *plotWindow = nullptr) = 0;
+  virtual void plotSurface(bool accepted, int plotIndex,
+                           const QString &axisName, const QString &logName,
+                           const std::set<double> &customLogValues,
+                           const QList<QString> &workspaceNames) = 0;
+  virtual void plotContour(bool accepted, int plotIndex,
+                           const QString &axisName, const QString &logName,
+                           const std::set<double> &customLogValues,
+                           const QList<QString> &workspaceNames) = 0;
 
   // Interface Methods
   virtual void showVatesSimpleInterface() = 0;
@@ -123,12 +130,10 @@ public:
   virtual void showSampleMaterialWindow() = 0;
   virtual void showAlgorithmHistory() = 0;
 
-  virtual MantidSurfacePlotDialog *
-  createSurfacePlotDialog(int flags, QStringList wsNames,
-                          const QString &plotType) = 0;
   virtual MantidWSIndexDialog *
-  createWorkspaceIndexDialog(int flags, QStringList wsNames, bool showWaterfall,
-                             bool showPlotAll, bool showTiledOpt) = 0;
+  createWorkspaceIndexDialog(int flags, const QStringList &wsNames,
+                             bool showWaterfall, bool showPlotAll,
+                             bool showTiledOpt, bool isAdvanced = false) = 0;
 
   virtual void updateProject() = 0;
   virtual void showCritical(const QString &) {}
diff --git a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MantidSurfacePlotDialog.h b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MantidSurfacePlotDialog.h
deleted file mode 100644
index f89539d94d0c8ed89e40709fe686d540d3fc5923..0000000000000000000000000000000000000000
--- a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MantidSurfacePlotDialog.h
+++ /dev/null
@@ -1,91 +0,0 @@
-#ifndef MANTIDQT_MANTIDWIDGETS_MANTIDSURFACEPLOTDIALOG_H_
-#define MANTIDQT_MANTIDWIDGETS_MANTIDSURFACEPLOTDIALOG_H_
-
-#include "MantidQtMantidWidgets/WidgetDllOption.h"
-#include "MantidWSIndexDialog.h"
-#include <QComboBox>
-
-namespace MantidQt {
-namespace MantidWidgets {
-class MantidDisplayBase;
-
-/**
- * The MantidSurfacePlotDialog offers the same functionality of choosing a
- * workspace index/spectrum No as MantidWSIndexDialog, but adds to it the
- * ability to choose a log value and the name for an axis.
- * This is for plotting a surface from a WorkspaceGroup.
- *
- * - The user may choose only one spectrum No, not a range.
- * - The user is offered the choice of only those logs that have single values
- * per workspace.
- */
-class EXPORT_OPT_MANTIDQT_MANTIDWIDGETS MantidSurfacePlotDialog
-    : public QDialog {
-  Q_OBJECT
-
-public:
-  /// Struct to hold user input
-  struct UserInputSurface {
-    bool accepted;
-    int plotIndex;
-    QString axisName;
-    QString logName;
-    std::set<double> customLogValues;
-  };
-
-  /// Constructor - same parameters as one of the parent constructors, along
-  /// with a list of the names of workspaces to be plotted.
-  MantidSurfacePlotDialog(MantidDisplayBase *parent, Qt::WFlags flags,
-                          QList<QString> wsNames, const QString &plotType);
-  /// Returns a structure holding all of the selected options
-  UserInputSurface getSelections() const;
-  /// Returns the workspace index to be plotted
-  int getPlot() const;
-  /// Display an error message box
-  static void showPlotOptionsError(const QString &message);
-  /// The string "Workspace index"
-  static const QString WORKSPACE_INDEX;
-  /// The string "Custom"
-  static const QString CUSTOM;
-
-private slots:
-  /// Called when the OK button is pressed.
-  void plot();
-  /// Called when the log selection is changed.
-  void onLogSelected(const QString &logName);
-
-private:
-  MantidWSIndexWidget m_widget;
-  /// Initializes the layout of the dialog
-  void init(const QString &plotType);
-  /// Initializes the layout of the log options
-  void initLogs();
-  /// Initializes the layout of the buttons
-  void initButtons();
-  /// Populate log combo box with log options
-  void populateLogComboBox();
-  /// Gets input name for log axis
-  const QString getAxisName() const;
-  /// Gets input name for log to use
-  const QString getLogName() const;
-  /// Returns the input custom log values
-  const std::set<double> getCustomLogValues() const;
-  /// A pointer to the parent MantidUI object
-  MantidDisplayBase *m_mantidUI;
-  /// A list of names of workspaces which are to be plotted.
-  QList<QString> m_wsNames;
-  /// Set to true when user accepts input
-  bool m_accepted;
-  /// Qt objects
-  QPushButton *m_okButton, *m_cancelButton;
-  QHBoxLayout *m_buttonBox;
-  QVBoxLayout *m_logBox, *m_outer;
-  QComboBox *m_logSelector;
-  QLineEdit *m_axisNameEdit, *m_logValues;
-  QLabel *m_logLabel, *m_axisLabel, *m_customLogLabel;
-  /// Minimum width for dialog to fit title in
-  static const int MINIMUM_WIDTH;
-};
-}
-}
-#endif // MANTIDQT_MANTIDWIDGETS_MANTIDSURFACEPLOTDIALOG_H_
diff --git a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MantidTreeWidget.h b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MantidTreeWidget.h
index ff25d02926a82a8b11c31cd51fd61b8c3a1d7716..311ea7a8e27d66a96f7472e764346001350166d5 100644
--- a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MantidTreeWidget.h
+++ b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MantidTreeWidget.h
@@ -4,7 +4,6 @@
 #include "MantidQtMantidWidgets/WidgetDllOption.h"
 #include <MantidAPI/AnalysisDataService.h>
 #include <MantidAPI/MatrixWorkspace_fwd.h>
-#include <MantidQtMantidWidgets/MantidSurfacePlotDialog.h>
 #include <MantidQtMantidWidgets/MantidWSIndexDialog.h>
 
 #include <QTreeWidget>
@@ -29,8 +28,8 @@ public:
   QStringList getSelectedWorkspaceNames() const;
   MantidWSIndexWidget::UserInput
   chooseSpectrumFromSelected(bool showWaterfallOpt = true,
-                             bool showPlotAll = true,
-                             bool showTiledOpt = true) const;
+                             bool showPlotAll = true, bool showTiledOpt = true,
+                             bool isAdvanced = false) const;
   void setSortScheme(MantidItemSortScheme);
   void setSortOrder(Qt::SortOrder);
   MantidItemSortScheme getSortScheme() const;
@@ -41,16 +40,10 @@ public:
   void dropEvent(QDropEvent *de) override;
   QList<boost::shared_ptr<const Mantid::API::MatrixWorkspace>>
   getSelectedMatrixWorkspaces() const;
-  MantidSurfacePlotDialog::UserInputSurface
-  chooseSurfacePlotOptions(int nWorkspaces) const;
-  MantidSurfacePlotDialog::UserInputSurface
-  chooseContourPlotOptions(int nWorkspaces) const;
 
 protected:
   void dragMoveEvent(QDragMoveEvent *de) override;
   void dragEnterEvent(QDragEnterEvent *de) override;
-  MantidSurfacePlotDialog::UserInputSurface
-  choosePlotOptions(const QString &type, int nWorkspaces) const;
 
 private:
   QPoint m_dragStartPosition;
diff --git a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MantidWSIndexDialog.h b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MantidWSIndexDialog.h
index 8dc059adc3396d61a3ec97398598206603d13238..9ba02a7b68a0cd31352112d71bbf5e912fc8b7ae 100644
--- a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MantidWSIndexDialog.h
+++ b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MantidWSIndexDialog.h
@@ -5,6 +5,7 @@
 // Includes
 //----------------------------------
 #include "MantidQtMantidWidgets/WidgetDllOption.h"
+#include "MantidAPI/MatrixWorkspace.h"
 #include <QCheckBox>
 #include <QComboBox>
 #include <QDialog>
@@ -13,6 +14,7 @@
 #include <QList>
 #include <QMap>
 #include <QString>
+#include <QGroupBox>
 #include <QVBoxLayout>
 #include <QValidator>
 
@@ -27,36 +29,32 @@ class IntervalList;
 
 /**
         The MantidWSIndexDialog class presents users with a dialog so that
-   they may
-        specify which workspace indices / spectra IDs are to be plotted by
-   Mantid.
+   they may specify which workspace indices / spectra IDs are to be plotted
+   by Mantid and the manner by which they are plotted.
 
         They are prompted with the available range(s) of indices/IDs they
-   can plot.
-        They must enter a range(s) that is(are) enclosed within those
+   can plot.They must enter a range(s) that is(are) enclosed within those
    ranges.
 
         "Ranges" are of a format you've probably seen when inputting page
-   numbers to
-        print into a word processing program or similar, i.e. "2, 4-6" to
-   print out
-        pages 2, 4, 5 and 6.
+   numbers to print into a word processing program or similar,
+   i.e. "2, 4-6" to print out pages 2, 4, 5 and 6.
 
         Ranges are defined by the "Interval" and "IntervalList" classes.
 
         The IntervalListValidator class overrides QValidator, and allows
-   Mantid
-        to assertain whether a user has attempted to input a valid range or
-   not.
-        Altering this class will affect the behaviour of what is allowed to
-   be
-   typed,
-        and what inputs allow the "OK" button to be pressed.
+   Mantid to assertain whether a user has attempted to input a valid range
+   or not. Altering this class will affect the behaviour of what is allowed
+   to be typed, and what inputs allow the "OK" button to be pressed.
 
         TODO - perhaps the interval objects are useful elsewhere, in which
-   case
-   those
-                   three classes are best in thier own header and source.
+   case those three classes are best in thier own header and source.
+
+        This dialog also enables one to choose how to do the plotting.
+   One can choose between simple 1D plot, waterfall or tiled plot.
+   Also the advanced form of this dialog has surface and contour plot
+   and enables you to put a log value into the plot instead of the
+   worksapce index or spectrum number.
 
         @author Peter G Parker, ISIS, RAL
         @date 2011/10/06
@@ -169,6 +167,8 @@ public:
   /// IntervalList
   /// object.
   void setIntervalList(const IntervalList &);
+  /// Clears the interval list
+  void clear();
 
   /// Returns a set of ints that represents the interval.
   std::set<int> getIntSet() const;
@@ -223,13 +223,12 @@ private:
 class EXPORT_OPT_MANTIDQT_MANTIDWIDGETS MantidWSIndexWidget : public QWidget {
   Q_OBJECT
 
-  /** Auxiliar class to wrap the QLine allowing to have a warn to the user
- * for
- *  invalid inputs.
-*/
+  /** Auxiliary class to wrap the QLineEdit allowing warning to the user
+   * for invalid inputs.
+   */
   class QLineEditWithErrorMark : public QWidget {
   public:
-    /// constructor that will join togheter the QLineEdit and an 'invisible'
+    /// constructor to join together the QLineEdit and an 'invisible'
     /// *
     /// label.
     explicit QLineEditWithErrorMark(QWidget *parent = 0);
@@ -248,41 +247,85 @@ class EXPORT_OPT_MANTIDQT_MANTIDWIDGETS MantidWSIndexWidget : public QWidget {
 
 public:
   /**
-        * POD structure to hold all user-selected input
-        */
+   * Plain old data structures to hold all user-selected input
+   */
+
+  /// Struct to hold user input
+  struct UserInputAdvanced {
+    bool accepted;
+    int plotIndex;
+    QString axisName;
+    QString logName;
+    std::set<double> customLogValues;
+    QList<QString> workspaceNames;
+  };
+
   struct UserInput {
     QMultiMap<QString, std::set<int>> plots;
+    bool simple;
     bool waterfall;
     bool tiled;
+    bool errors;
+    bool surface;
+    bool contour;
+    bool isAdvanced;
+    UserInputAdvanced advanced;
   };
 
+  /// The string "Workspace index"
+  static const QString WORKSPACE_NAME;
+  static const QString WORKSPACE_INDEX;
+  /// The string "Custom"
+  static const QString CUSTOM;
+  /// Strings for plot types
+  static const QString SIMPLE_PLOT;
+  static const QString WATERFALL_PLOT;
+  static const QString SURFACE_PLOT;
+  static const QString CONTOUR_PLOT;
+
   /// Constructor - same parameters as one of the parent constructors, along
   /// with a
   /// list of the names of workspaces to be plotted.
-  MantidWSIndexWidget(QWidget *parent, Qt::WFlags flags, QList<QString> wsNames,
+  MantidWSIndexWidget(QWidget *parent, Qt::WFlags flags,
+                      const QList<QString> &wsNames,
                       const bool showWaterfallOption = false,
-                      const bool showTiledOption = false);
+                      const bool showTiledOption = false,
+                      const bool isAdvanced = false);
 
   /// Returns a structure holding all of the selected options
-  UserInput getSelections() const;
+  UserInput getSelections();
   /// Returns the QMultiMap that contains all the workspaces that are to be
   /// plotted,
   /// mapped to the set of workspace indices.
   QMultiMap<QString, std::set<int>> getPlots() const;
+  /// Returns whether the simple 1D plot option has been selected
+  bool is1DPlotSelected() const;
   /// Returns whether the waterfall option has been selected
   bool isWaterfallPlotSelected() const;
   /// Called by dialog when plot requested
   bool plotRequested();
   /// Called by dialog when plot all requested
-  void plotAllRequested();
+  bool plotAllRequested();
+  /// Validate plot options when either plot or plot all is requested
+  bool validatePlotOptions();
   /// Returns whether the tiled plot option has been selected
   bool isTiledPlotSelected() const;
+  /// Returns whether surface plot is selected
+  bool isSurfacePlotSelected() const;
+  /// Returns whether contour plot is selected
+  bool isContourPlotSelected() const;
+  /// Returns whether the error bars option has been selected
+  bool isErrorBarsSelected() const;
 
 private slots:
   /// Called when the wsField has been edited.
   void editedWsField();
   /// Called when the spectraField has been edited.
   void editedSpectraField();
+  /// Called when the log selection is changed.
+  void onLogSelected(const QString &logName);
+  /// Called when the plot option has changed.
+  void onPlotOptionChanged(const QString &logName);
 
 private:
   /// Initializes the layout of the dialog
@@ -293,18 +336,36 @@ private:
   void initSpectraBox();
   /// Initialize the layout of the options check boxes
   void initOptionsBoxes();
+  /// Initializes the layout of the log options
+  void initLogs();
+  /// Populate the log combo box
+  void populateLogComboBox();
+  /// Get a handle a workspace by name
+  Mantid::API::MatrixWorkspace_const_sptr
+  getWorkspace(const QString &workspaceName) const;
+  /// Check if workspaces are suitable for contour or surface plot
+  bool isSuitableForContourOrSurfacePlot() const;
+  /// Check if workspaces are suitable for use of log values
+  bool isSuitableForLogValues(const QString &plotOption) const;
+  /// Gets the axis name
+  const QString getAxisName() const;
+  /// Gets the log name
+  const QString getLogName() const;
+  /// Get the set of custom log values
+  const std::set<double> getCustomLogValues() const;
+  /// Provide warning if there are plot errors.
+  void showPlotOptionsError(const QString &message);
+  /// Get the plot index
+  int getPlotIndex() const;
 
   /// Check to see if all workspaces have a spectrum axis
   void checkForSpectraAxes();
 
   /// Generates an IntervalList which defines which workspace indices the
-  /// user
-  /// can
-  /// ask to plot.
+  /// user can ask to plot.
   void generateWsIndexIntervals();
-  /// Generates an IntervalList which defines which spectra IDs the user can
-  /// ask
-  /// to plot.
+  /// Generates an IntervalList which defines which spectra IDs the
+  /// user can ask to plot.
   void generateSpectraNumIntervals();
 
   /// Whether or not there are any common spectra IDs between workspaces.
@@ -319,19 +380,28 @@ private:
   /// Do we allow the display of the tiled option
   bool m_tiled;
 
+  /// Is the plotting advanced?
+  bool m_advanced;
+
   /// Pointers to the obligatory Qt objects:
-  QLabel *m_wsMessage, *m_spectraMessage, *m_orMessage;
-  QLineEditWithErrorMark *m_wsField, *m_spectraField;
-  QVBoxLayout *m_outer, *m_wsBox, *m_spectraBox;
-  QHBoxLayout *m_optionsBox;
-  QComboBox *m_plotOptions;
+  QLabel *m_wsMessage, *m_spectraMessage, *m_orMessage, *m_plotOptionLabel,
+      *m_logLabel, *m_customLogLabel, *m_axisLabel;
+  QLineEditWithErrorMark *m_wsField, *m_spectraField, *m_logValues;
+  QGroupBox *m_logOptionsGroup;
+  QVBoxLayout *m_outer, *m_wsBox, *m_spectraBox, *m_logBox, *m_optionsBox;
+  QComboBox *m_plotOptions, *m_logSelector;
+  QCheckBox *m_showErrorBars;
+  QLineEditWithErrorMark *m_axisNameEdit;
 
   /// A list of names of workspaces which are to be plotted.
   QList<QString> m_wsNames;
-  /// IntervalLists for the range of indices/IDs AVAILABLE to the user.
+  /// IntervalLists for the range of indices/numbers AVAILABLE to the user.
   IntervalList m_wsIndexIntervals, m_spectraNumIntervals;
-  /// IntervalLists for the range of indices/IDs CHOSEN by the user.
-  IntervalList m_wsIndexChoice, m_spectraIdChoice;
+  /// IntervalLists for the range of indices/numbers CHOSEN by the user.
+  IntervalList m_wsIndexChoice, m_spectraNumChoice;
+  /// Flags to indicate which one of the two interval lists above is chosen by
+  /// user
+  bool m_usingWsIndexChoice = false, m_usingSprectraNumChoice = false;
 };
 
 class EXPORT_OPT_MANTIDQT_MANTIDWIDGETS MantidWSIndexDialog : public QDialog {
@@ -339,20 +409,30 @@ class EXPORT_OPT_MANTIDQT_MANTIDWIDGETS MantidWSIndexDialog : public QDialog {
 
 public:
   /// Constructor - has a list of the names of workspaces to be plotted.
-  MantidWSIndexDialog(QWidget *parent, Qt::WFlags flags, QList<QString> wsNames,
+  MantidWSIndexDialog(QWidget *parent, Qt::WFlags flags,
+                      const QList<QString> &wsNames,
                       const bool showWaterfallOption = false,
                       const bool showPlotAll = true,
-                      const bool showTiledOption = false);
+                      const bool showTiledOption = false,
+                      const bool isAdvanced = false);
   /// Returns a structure holding all of the selected options
-  MantidWSIndexWidget::UserInput getSelections() const;
+  MantidWSIndexWidget::UserInput getSelections();
   /// Returns the QMultiMap that contains all the workspaces that are to be
   /// plotted,
   /// mapped to the set of workspace indices.
   QMultiMap<QString, std::set<int>> getPlots() const;
   /// Returns whether the waterfall option has been selected
+  bool is1DPlotSelected() const;
+  /// Returns whether the waterfall option has been selected
   bool isWaterfallPlotSelected() const;
   /// Returns whether the tiled plot option has been selected
   bool isTiledPlotSelected() const;
+  /// Returns whether surface plot is selected
+  bool isSurfacePlotSelected() const;
+  /// Returns whether surface plot is selected
+  bool isContourPlotSelected() const;
+  /// Returns whether error bars have been selected
+  bool isErrorBarsSelected() const;
 private slots:
   /// Called when the OK button is pressed.
   void plot();
@@ -362,7 +442,7 @@ private slots:
 private:
   MantidWSIndexWidget m_widget;
   /// Initializes the layout of the dialog
-  void init();
+  void init(bool isAdvanced);
   /// Initializes the layout of the buttons
   void initButtons();
   /// Do we allow the display of the "Plot all" button
diff --git a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MuonFitDataSelector.h b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MuonFitDataSelector.h
index 5db55ca48cb933050c980c02f4fceec8298ee45d..2b0a1ff363daa59eafab3ae05e96d9ced741a3ac 100644
--- a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MuonFitDataSelector.h
+++ b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MuonFitDataSelector.h
@@ -3,8 +3,10 @@
 
 #include "ui_MuonFitDataSelector.h"
 #include "WidgetDllOption.h"
-#include "MantidQtMantidWidgets/IMuonFitDataSelector.h"
 #include "MantidQtAPI/MantidWidget.h"
+#include "MantidQtMantidWidgets/FitPropertyBrowser.h"
+#include "MantidQtMantidWidgets/MuonFitPropertyBrowser.h"
+#include "MantidQtMantidWidgets/IMuonFitDataSelector.h"
 
 namespace MantidQt {
 namespace MantidWidgets {
@@ -40,12 +42,13 @@ class EXPORT_OPT_MANTIDQT_MANTIDWIDGETS MuonFitDataSelector
     : public MantidQt::API::MantidWidget,
       public IMuonFitDataSelector {
   Q_OBJECT
+
 public:
   /// Basic constructor
   explicit MuonFitDataSelector(QWidget *parent);
   /// Constructor with more options
-  MuonFitDataSelector(QWidget *parent, int runNumber, const QString &instName,
-                      size_t numPeriods, const QStringList &groups);
+  MuonFitDataSelector(QWidget *parent, int runNumber, const QString &instName);
+  //, size_t numPeriods, const QStringList &groups);
   // --- MantidWidget methods ---
   /// Get user input through a common interface
   QVariant getUserInput() const override;
@@ -60,14 +63,11 @@ public:
   double getEndTime() const override;
   /// Get names of chosen groups
   QStringList getChosenGroups() const override;
-  /// Set chosen group
-  void setChosenGroup(const QString &group) override;
-  /// Clear list of selected groups
-  void clearChosenGroups() const;
+  /// Set chosen group/period
+  void setGroupsSelected(QStringList groups) { m_chosenGroups = groups; };
+  void setPeriodsSelected(QStringList periods) { m_chosenPeriods = periods; };
   /// Get selected periods
   QStringList getPeriodSelections() const override;
-  /// Set selected period
-  void setChosenPeriod(const QString &period) override;
   /// Get type of fit
   IMuonFitDataSelector::FitType getFitType() const override;
   /// Get instrument name
@@ -88,13 +88,9 @@ public:
   bool askUserWhetherToOverwrite() override;
 
 public slots:
-  /// Set number of periods in data
-  void setNumPeriods(size_t numPeriods) override;
   /// Set starting run number, instrument and (optionally) file path
   void setWorkspaceDetails(const QString &runNumbers, const QString &instName,
                            const boost::optional<QString> &filePath) override;
-  /// Set names of available groups
-  void setAvailableGroups(const QStringList &groupNames) override;
   /// Set start time for fit
   void setStartTime(double start) override;
   /// Set end time for fit
@@ -105,8 +101,6 @@ public slots:
   void setEndTimeQuietly(double end) override;
   /// Called when user changes runs
   void userChangedRuns();
-  /// Called when period combination box checked/unchecked
-  void periodCombinationStateChanged(int state);
   /// Called when fit type changed
   void fitTypeChanged(bool state);
   /// Called when group/period box selection changes
@@ -115,10 +109,6 @@ public slots:
 signals:
   /// Edited the start or end fields
   void dataPropertiesChanged();
-  /// Changed the groups selection
-  void selectedGroupsChanged();
-  /// Changed the periods selection
-  void selectedPeriodsChanged();
   /// Changed the workspace
   void workspaceChanged();
   /// Simultaneous fit label changed
@@ -127,30 +117,20 @@ signals:
   void datasetIndexChanged(int index);
 
 private:
-  /// Add a checkbox to Groups section
-  void addGroupCheckbox(const QString &name);
-  /// Clear all checkboxes from Groups section
-  void clearGroupCheckboxes();
-  /// Set visibility of "Periods" section
-  void setPeriodVisibility(bool visible);
   /// Set default values in some input controls
   void setDefaultValues();
-  /// Set up validators for input
-  void setUpValidators();
   /// Set up connections for signals/slots
   void setUpConnections();
   /// Set type for fit
   void setFitType(IMuonFitDataSelector::FitType type);
-  /// Check/uncheck "Combination" box and enable/disable text boxes
-  void setPeriodCombination(bool on);
   /// Set busy cursor and disable input
   void setBusyState();
   /// Member - user interface
   Ui::MuonFitDataSelector m_ui;
-  /// Map of group names to checkboxes
-  QMap<QString, QCheckBox *> m_groupBoxes;
-  /// Map of period names to checkboxes
-  QMap<QString, QCheckBox *> m_periodBoxes;
+  double m_startX;
+  double m_endX;
+  QStringList m_chosenGroups;
+  QStringList m_chosenPeriods;
 
 private slots:
   /// Set normal cursor and enable input
@@ -164,4 +144,4 @@ private slots:
 } // namespace MantidWidgets
 } // namespace MantidQt
 
-#endif /* MANTID_MANTIDWIDGETS_MUONFITDATASELECTOR_H_ */
\ No newline at end of file
+#endif /* MANTID_MANTIDWIDGETS_MUONFITDATASELECTOR_H_ */
diff --git a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MuonFitDataSelector.ui b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MuonFitDataSelector.ui
index c889629f62a539fc5dbf15890af182410ae221a9..fc91bd25b66f2a9640383295acf4be3140b6a1f0 100644
--- a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MuonFitDataSelector.ui
+++ b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MuonFitDataSelector.ui
@@ -21,258 +21,14 @@
      </property>
      <layout class="QVBoxLayout" name="verticalLayout">
       <item>
-       <layout class="QHBoxLayout" name="horizontalLayoutData">
-        <item>
-         <widget class="QGroupBox" name="groupBoxWorkspaces">
-          <property name="title">
-           <string>Workspaces</string>
-          </property>
-          <layout class="QVBoxLayout" name="verticalLayoutWorkspaces">
-           <item>
-            <layout class="QHBoxLayout" name="horizontalLayoutRuns">
-             <item>
-              <widget class="QLabel" name="lblRuns">
-               <property name="text">
-                <string>Runs:</string>
-               </property>
-              </widget>
-             </item>
-             <item>
-              <widget class="MantidQt::API::MWRunFiles" name="runs" native="true">
-               <property name="sizePolicy">
-                <sizepolicy hsizetype="Expanding" vsizetype="Preferred">
-                 <horstretch>0</horstretch>
-                 <verstretch>0</verstretch>
-                </sizepolicy>
-               </property>
-               <property name="label" stdset="0">
-                <string/>
-               </property>
-               <property name="multipleFiles" stdset="0">
-                <bool>true</bool>
-               </property>
-               <property name="findRunFiles" stdset="0">
-                <bool>true</bool>
-               </property>
-              </widget>
-             </item>
-            </layout>
-           </item>
-           <item>
-            <layout class="QHBoxLayout" name="horizontalLayoutRadioButtons">
-             <item>
-              <widget class="QRadioButton" name="rbCoAdd">
-               <property name="text">
-                <string>Co-add</string>
-               </property>
-               <property name="checked">
-                <bool>true</bool>
-               </property>
-              </widget>
-             </item>
-             <item>
-              <widget class="QRadioButton" name="rbSimultaneous">
-               <property name="sizePolicy">
-                <sizepolicy hsizetype="MinimumExpanding" vsizetype="Fixed">
-                 <horstretch>0</horstretch>
-                 <verstretch>0</verstretch>
-                </sizepolicy>
-               </property>
-               <property name="minimumSize">
-                <size>
-                 <width>0</width>
-                 <height>0</height>
-                </size>
-               </property>
-               <property name="text">
-                <string>Simultaneous</string>
-               </property>
-              </widget>
-             </item>
-            </layout>
-           </item>
-           <item>
-            <layout class="QHBoxLayout" name="horizontalLayoutTime">
-             <item>
-              <widget class="QLabel" name="lblStart">
-               <property name="text">
-                <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Start (us):&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
-               </property>
-              </widget>
-             </item>
-             <item>
-              <widget class="QLineEdit" name="txtStart">
-               <property name="sizePolicy">
-                <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
-                 <horstretch>0</horstretch>
-                 <verstretch>0</verstretch>
-                </sizepolicy>
-               </property>
-              </widget>
-             </item>
-             <item>
-              <spacer name="horizontalSpacerTime">
-               <property name="orientation">
-                <enum>Qt::Horizontal</enum>
-               </property>
-               <property name="sizeHint" stdset="0">
-                <size>
-                 <width>40</width>
-                 <height>20</height>
-                </size>
-               </property>
-              </spacer>
-             </item>
-             <item>
-              <widget class="QLabel" name="lblEnd">
-               <property name="text">
-                <string>End (us):</string>
-               </property>
-              </widget>
-             </item>
-             <item>
-              <widget class="QLineEdit" name="txtEnd">
-               <property name="sizePolicy">
-                <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
-                 <horstretch>0</horstretch>
-                 <verstretch>0</verstretch>
-                </sizepolicy>
-               </property>
-              </widget>
-             </item>
-            </layout>
-           </item>
-          </layout>
-         </widget>
-        </item>
-        <item>
-         <widget class="QGroupBox" name="groupBoxGroups">
-          <property name="title">
-           <string>Groups</string>
-          </property>
-          <layout class="QVBoxLayout" name="verticalLayoutGroups">
-           <item>
-            <widget class="QCheckBox" name="chkFwd">
-             <property name="text">
-              <string>fwd</string>
-             </property>
-            </widget>
-           </item>
-           <item>
-            <widget class="QCheckBox" name="chkBwd">
-             <property name="text">
-              <string>bwd</string>
-             </property>
-            </widget>
-           </item>
-          </layout>
-         </widget>
-        </item>
-        <item>
-         <widget class="QGroupBox" name="groupBoxPeriods">
-          <property name="title">
-           <string>Periods</string>
-          </property>
-          <layout class="QVBoxLayout" name="verticalLayoutPeriods">
-           <item>
-            <widget class="QCheckBox" name="chk1">
-             <property name="text">
-              <string>1</string>
-             </property>
-            </widget>
-           </item>
-           <item>
-            <widget class="QCheckBox" name="chk2">
-             <property name="text">
-              <string>2</string>
-             </property>
-            </widget>
-           </item>
-           <item>
-            <layout class="QHBoxLayout" name="horizontalLayoutPeriodsCombine">
-             <item>
-              <widget class="QCheckBox" name="chkCombine">
-               <property name="text">
-                <string>Combine</string>
-               </property>
-              </widget>
-             </item>
-             <item>
-              <widget class="QLineEdit" name="txtFirst">
-               <property name="enabled">
-                <bool>false</bool>
-               </property>
-               <property name="sizePolicy">
-                <sizepolicy hsizetype="Maximum" vsizetype="Fixed">
-                 <horstretch>0</horstretch>
-                 <verstretch>0</verstretch>
-                </sizepolicy>
-               </property>
-              </widget>
-             </item>
-             <item>
-              <widget class="QLabel" name="lblMinus">
-               <property name="text">
-                <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;&lt;span style=&quot; font-size:12pt; font-weight:600;&quot;&gt;-&lt;/span&gt;&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
-               </property>
-              </widget>
-             </item>
-             <item>
-              <widget class="QLineEdit" name="txtSecond">
-               <property name="enabled">
-                <bool>false</bool>
-               </property>
-               <property name="sizePolicy">
-                <sizepolicy hsizetype="Maximum" vsizetype="Fixed">
-                 <horstretch>0</horstretch>
-                 <verstretch>0</verstretch>
-                </sizepolicy>
-               </property>
-              </widget>
-             </item>
-            </layout>
-           </item>
-          </layout>
-         </widget>
-        </item>
-       </layout>
-      </item>
-      <item>
-       <layout class="QHBoxLayout" name="horizontalLayoutLabel">
-        <item>
-         <widget class="QLabel" name="lblSimFitLabel">
-          <property name="text">
-           <string>Label for simultaneous fit: </string>
-          </property>
-         </widget>
-        </item>
+       <layout class="QHBoxLayout" name="horizontalLayoutDataset">
         <item>
-         <widget class="QLineEdit" name="txtSimFitLabel">
-          <property name="enabled">
-           <bool>false</bool>
-          </property>
+         <widget class="QLabel" name="label">
           <property name="text">
-           <string>0</string>
+           <string>Display Parameters For:</string>
           </property>
          </widget>
         </item>
-       </layout>
-      </item>
-      <item>
-       <layout class="QHBoxLayout" name="horizontalLayoutDataset">
-        <item>
-         <spacer name="horizontalSpacerDatasetLeft">
-          <property name="orientation">
-           <enum>Qt::Horizontal</enum>
-          </property>
-          <property name="sizeHint" stdset="0">
-           <size>
-            <width>40</width>
-            <height>20</height>
-           </size>
-          </property>
-         </spacer>
-        </item>
         <item>
          <widget class="QPushButton" name="btnPrevDataset">
           <property name="sizePolicy">
@@ -319,7 +75,7 @@
           </property>
           <property name="sizeHint" stdset="0">
            <size>
-            <width>40</width>
+            <width>30</width>
             <height>20</height>
            </size>
           </property>
@@ -327,6 +83,132 @@
         </item>
        </layout>
       </item>
+      <item>
+       <layout class="QHBoxLayout" name="horizontalLayoutLabel">
+        <item>
+         <widget class="QLabel" name="lblSimFitLabel">
+          <property name="text">
+           <string>Label for simultaneous fit: </string>
+          </property>
+         </widget>
+        </item>
+        <item>
+         <widget class="QLineEdit" name="txtSimFitLabel">
+          <property name="enabled">
+           <bool>false</bool>
+          </property>
+          <property name="text">
+           <string>0</string>
+          </property>
+         </widget>
+        </item>
+       </layout>
+      </item>
+      <item>
+       <layout class="QHBoxLayout" name="horizontalLayoutData">
+        <item>
+         <layout class="QHBoxLayout" name="horizontalLayout">
+          <item>
+           <layout class="QHBoxLayout" name="horizontalLayout_2">
+            <item>
+             <layout class="QHBoxLayout" name="horizontalLayoutRuns">
+              <item>
+               <widget class="QLabel" name="lblRuns">
+                <property name="text">
+                 <string>Runs:</string>
+                </property>
+               </widget>
+              </item>
+              <item>
+               <widget class="MantidQt::API::MWRunFiles" name="runs" native="true">
+                <property name="sizePolicy">
+                 <sizepolicy hsizetype="Expanding" vsizetype="Preferred">
+                  <horstretch>0</horstretch>
+                  <verstretch>0</verstretch>
+                 </sizepolicy>
+                </property>
+                <property name="label" stdset="0">
+                 <string/>
+                </property>
+                <property name="multipleFiles" stdset="0">
+                 <bool>true</bool>
+                </property>
+                <property name="findRunFiles" stdset="0">
+                 <bool>true</bool>
+                </property>
+               </widget>
+              </item>
+             </layout>
+            </item>
+           </layout>
+          </item>
+         </layout>
+        </item>
+       </layout>
+      </item>
+      <item>
+       <layout class="QHBoxLayout" name="horizontalLayout_4">
+        <item>
+         <layout class="QHBoxLayout" name="horizontalLayoutRadioButtons">
+          <item>
+           <spacer name="horizontalSpacer_2">
+            <property name="orientation">
+             <enum>Qt::Horizontal</enum>
+            </property>
+            <property name="sizeHint" stdset="0">
+             <size>
+              <width>40</width>
+              <height>20</height>
+             </size>
+            </property>
+           </spacer>
+          </item>
+          <item>
+           <widget class="QRadioButton" name="rbCoAdd">
+            <property name="text">
+             <string>Co-add</string>
+            </property>
+            <property name="checked">
+             <bool>true</bool>
+            </property>
+           </widget>
+          </item>
+          <item>
+           <widget class="QRadioButton" name="rbSimultaneous">
+            <property name="sizePolicy">
+             <sizepolicy hsizetype="MinimumExpanding" vsizetype="Fixed">
+              <horstretch>0</horstretch>
+              <verstretch>0</verstretch>
+             </sizepolicy>
+            </property>
+            <property name="minimumSize">
+             <size>
+              <width>0</width>
+              <height>0</height>
+             </size>
+            </property>
+            <property name="text">
+             <string>Simultaneous</string>
+            </property>
+           </widget>
+          </item>
+          <item>
+           <spacer name="horizontalSpacer">
+            <property name="orientation">
+             <enum>Qt::Horizontal</enum>
+            </property>
+            <property name="sizeHint" stdset="0">
+             <size>
+              <width>40</width>
+              <height>20</height>
+             </size>
+            </property>
+           </spacer>
+          </item>
+         </layout>
+        </item>
+       </layout>
+      </item>
      </layout>
     </widget>
    </item>
diff --git a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MuonFitPropertyBrowser.h b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MuonFitPropertyBrowser.h
index 7ddcbcfacbb2e3df4c1db27a1e1b612d29114e1c..341e858c6e877d5b7b87791a1c95e7b5ab5fe0a5 100644
--- a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MuonFitPropertyBrowser.h
+++ b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MuonFitPropertyBrowser.h
@@ -5,10 +5,12 @@
 #include "MantidQtMantidWidgets/IMuonFitDataModel.h"
 #include "MantidQtMantidWidgets/IMuonFitFunctionModel.h"
 
+#include <QMap>
 /* Forward declarations */
 class QDockWidget;
 class QLabel;
 class QPushButton;
+class QCheckBox;
 class QMenu;
 class QSignalMapper;
 class QtTreePropertyBrowser;
@@ -21,7 +23,9 @@ class QtEnumPropertyManager;
 class QtProperty;
 class QtBrowserItem;
 class QVBoxLayout;
+class QGroupBox;
 class QSplitter;
+class QWidget;
 
 namespace Mantid {
 namespace API {
@@ -97,13 +101,33 @@ public:
   virtual void setFitEnabled(bool yes) override;
 
   void doTFAsymmFit(int maxIterations);
+  void setAvailableGroups(const QStringList &groups);
+  void setAvailablePeriods(const QStringList &periods);
 
+  QStringList getChosenGroups() const;
+  QStringList getChosenPeriods() const;
+
+  /// Clear list of selected groups
+  void clearChosenGroups() const;
+  void setAllGroups();
+  void setAllPairs();
+  void clearChosenPeriods() const;
+  void setChosenGroup(QString &group);
+  void setChosenPeriods(const QString &period);
+  void setSingleFitLabel(std::string name);
 public slots:
   /// Perform the fit algorithm
   void fit() override;
   /// Open sequential fit dialog
   void sequentialFit() override;
+
   void executeFitMenu(const QString &item) override;
+  void groupBtnPressed();
+  void periodBtnPressed();
+  void generateBtnPressed();
+  void combineBtnPressed();
+  void setNumPeriods(size_t numPeriods);
+
 signals:
   /// Emitted when sequential fit is requested by user
   void sequentialFitRequested();
@@ -117,6 +141,9 @@ signals:
   void userChangedDatasetIndex(int index) override;
   /// Emitted when "fit to raw data" is changed
   void fitRawDataClicked(bool enabled) override;
+  void groupBoxClicked();
+  void periodBoxClicked();
+  void reselctGroupClicked(bool enabled);
   /// Emitted when fit is about to be run
   void preFitChecksRequested(bool sequential) override;
 
@@ -127,12 +154,14 @@ protected:
 private slots:
   void doubleChanged(QtProperty *prop) override;
   void boolChanged(QtProperty *prop) override;
+  void enumChanged(QtProperty *prop) override;
 
 private:
   /// new menu option
   QAction *m_fitActionTFAsymm;
   /// override populating fit menu
   void populateFitMenuButton(QSignalMapper *fitMapper, QMenu *fitMenu) override;
+
   /// Get the registered function names
   void populateFunctionNames() override;
   /// Check if the workspace can be used in the fit
@@ -141,6 +170,18 @@ private:
   /// workspaces
   void finishAfterSimultaneousFit(const Mantid::API::IAlgorithm *fitAlg,
                                   const int nWorkspaces) const;
+
+  void clearGroupCheckboxes();
+  void addGroupCheckbox(const QString &name);
+  void genGroupWindow();
+  void genPeriodWindow();
+  void genCombinePeriodWindow();
+  void updateGroupDisplay();
+  void updatePeriodDisplay();
+  void setChosenPeriods(const QStringList &chosenPeriods);
+  void clearPeriodCheckboxes();
+  void addPeriodCheckbox(const QString &name);
+
   /// Splitter for additional widgets and splitter between this and browser
   QSplitter *m_widgetSplitter, *m_mainSplitter;
   /// Names of workspaces to fit
@@ -148,7 +189,35 @@ private:
   /// Label to use for simultaneous fits
   std::string m_simultaneousLabel;
   QtProperty *m_normalization;
-  mutable QStringList m_normalizationValue;
+  QStringList m_normalizationValue;
+
+  QtBrowserItem *m_multiFitSettingsGroup;
+  QtProperty *m_groupsToFit;
+  QStringList m_groupsToFitOptions;
+  /// Map of group names to checkboxes
+  QMap<QString, QtProperty *> m_groupBoxes;
+  QtProperty *m_showGroup;
+  QStringList m_showGroupValue;
+
+  QtProperty *m_periodsToFit;
+  QStringList m_periodsToFitOptions;
+  /// Map of group names to checkboxes
+  QMap<QString, QtProperty *> m_periodBoxes;
+  QtProperty *m_showPeriods;
+  QStringList m_showPeriodValue;
+  QLineEdit *m_positiveCombo;
+  QLineEdit *m_negativeCombo;
+
+  QPushButton *m_reselectGroupBtn;
+  QPushButton *m_reselectPeriodBtn;
+  QPushButton *m_generateBtn;
+  QGroupBox *m_btnGroup;
+  QDialog *m_groupWindow;
+  QDialog *m_periodWindow;
+  QDialog *m_comboWindow;
+
+  std::vector<std::string> m_groupsList = {"fwd", "bkwd", "top", "bottom",
+                                           "bwd"};
 };
 
 std::vector<double> readNormalization();
diff --git a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/WorkspacePresenter/IWorkspaceDockView.h b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/WorkspacePresenter/IWorkspaceDockView.h
index 5f67ee91f2e7b88ed165e439c98f0da77dd9bfad..244b6248ee06a017b9f519ea82ac40439e180768 100644
--- a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/WorkspacePresenter/IWorkspaceDockView.h
+++ b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/WorkspacePresenter/IWorkspaceDockView.h
@@ -88,7 +88,7 @@ public:
   virtual void showWorkspaceData() = 0;
   virtual void showInstrumentView() = 0;
   virtual void saveToProgram() = 0;
-  virtual void plotSpectrum(bool showErrors) = 0;
+  virtual void plotSpectrum(std::string type) = 0;
   virtual void showColourFillPlot() = 0;
   virtual void showDetectorsTable() = 0;
   virtual void showBoxDataTable() = 0;
@@ -103,8 +103,6 @@ public:
   virtual void showTransposed() = 0;
   virtual void convertToMatrixWorkspace() = 0;
   virtual void convertMDHistoToMatrixWorkspace() = 0;
-  virtual void showSurfacePlot() = 0;
-  virtual void showContourPlot() = 0;
 
   virtual bool executeAlgorithmAsync(Mantid::API::IAlgorithm_sptr alg,
                                      const bool wait = true) = 0;
diff --git a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/WorkspacePresenter/QWorkspaceDockView.h b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/WorkspacePresenter/QWorkspaceDockView.h
index 6d3dd9aa550072b3dd39084c7e8c726a832938b1..5d84af29f5ea1a369b7b31eb97518c7b405a59b6 100644
--- a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/WorkspacePresenter/QWorkspaceDockView.h
+++ b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/WorkspacePresenter/QWorkspaceDockView.h
@@ -12,11 +12,11 @@
 #include <MantidAPI/MatrixWorkspace_fwd.h>
 #include <MantidAPI/WorkspaceGroup_fwd.h>
 
-#include <MantidQtMantidWidgets/MantidSurfacePlotDialog.h>
 #include <MantidQtMantidWidgets/WorkspacePresenter/IWorkspaceDockView.h>
 #include <QDockWidget>
 #include <QMap>
 #include <QMetaType>
+#include <QHash>
 #include <boost/enable_shared_from_this.hpp>
 #include <boost/shared_ptr.hpp>
 #include <map>
@@ -86,11 +86,6 @@ public:
   MantidQt::MantidWidgets::WorkspacePresenterWN_wptr
   getPresenterWeakPtr() override;
 
-  MantidSurfacePlotDialog::UserInputSurface
-  chooseContourPlotOptions(int nWorkspaces) const;
-  MantidSurfacePlotDialog::UserInputSurface
-  chooseSurfacePlotOptions(int nWorkspaces) const;
-
   SortDirection getSortDirection() const override;
   SortCriteria getSortCriteria() const override;
   void sortWorkspaces(SortCriteria criteria, SortDirection direction) override;
@@ -129,7 +124,7 @@ public:
   void showWorkspaceData() override;
   void saveToProgram() override;
   void showInstrumentView() override;
-  void plotSpectrum(bool showErrors) override;
+  void plotSpectrum(std::string type) override;
   void showColourFillPlot() override;
   void showDetectorsTable() override;
   void showBoxDataTable() override;
@@ -144,8 +139,6 @@ public:
   void showTransposed() override;
   void convertToMatrixWorkspace() override;
   void convertMDHistoToMatrixWorkspace() override;
-  void showSurfacePlot() override;
-  void showContourPlot() override;
 
   bool executeAlgorithmAsync(Mantid::API::IAlgorithm_sptr alg,
                              const bool wait = true) override;
@@ -176,8 +169,7 @@ private:
       QMenu *menu, const Mantid::API::IMDWorkspace_const_sptr &WS) const;
   void addPeaksWorkspaceMenuItems(
       QMenu *menu, const Mantid::API::IPeaksWorkspace_const_sptr &WS) const;
-  void addWorkspaceGroupMenuItems(
-      QMenu *menu, const Mantid::API::WorkspaceGroup_const_sptr &groupWS) const;
+  void addWorkspaceGroupMenuItems(QMenu *menu) const;
   void addTableWorkspaceMenuItems(QMenu *menu) const;
   void addClearMenuItems(QMenu *menu, const QString &wsName);
 
@@ -224,14 +216,13 @@ private slots:
   void onClickShowTransposed();
   void onClickPlotSpectra();
   void onClickPlotSpectraErr();
+  void onClickPlotAdvanced();
   void onClickDrawColorFillPlot();
   void onClickShowDetectorTable();
   void onClickConvertToMatrixWorkspace();
   void onClickConvertMDHistoToMatrixWorkspace();
   void onClickShowAlgHistory();
   void onClickShowSampleMaterial();
-  void onClickPlotSurface();
-  void onClickPlotContour();
   void onClickClearUB();
   void incrementUpdateCount();
   void filterWorkspaceTree(const QString &text);
@@ -262,13 +253,13 @@ private:
 
   // Context-menu actions
   QAction *m_showData, *m_showInst, *m_plotSpec, *m_plotSpecErr,
-      *m_showDetectors, *m_showBoxData, *m_showVatesGui, *m_showSpectrumViewer,
-      *m_showSliceViewer, *m_colorFill, *m_showLogs, *m_showSampleMaterial,
-      *m_showHist, *m_showMDPlot, *m_showListData, *m_saveNexus, *m_rename,
-      *m_delete, *m_program, *m_ascendingSortAction, *m_descendingSortAction,
-      *m_byNameChoice, *m_byLastModifiedChoice, *m_showTransposed,
-      *m_convertToMatrixWorkspace, *m_convertMDHistoToMatrixWorkspace,
-      *m_clearUB, *m_plotSurface, *m_plotContour;
+      *m_plotAdvanced, *m_showDetectors, *m_showBoxData, *m_showVatesGui,
+      *m_showSpectrumViewer, *m_showSliceViewer, *m_colorFill, *m_showLogs,
+      *m_showSampleMaterial, *m_showHist, *m_showMDPlot, *m_showListData,
+      *m_saveNexus, *m_rename, *m_delete, *m_program, *m_ascendingSortAction,
+      *m_descendingSortAction, *m_byNameChoice, *m_byLastModifiedChoice,
+      *m_showTransposed, *m_convertToMatrixWorkspace,
+      *m_convertMDHistoToMatrixWorkspace, *m_clearUB;
 
   QAtomicInt m_updateCount;
   bool m_treeUpdating;
diff --git a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/WorkspacePresenter/ViewNotifiable.h b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/WorkspacePresenter/ViewNotifiable.h
index e368471a7f8082e9aa41fc3a3f59253c169a7c0e..1c9cebcdf592b4deb2ebd00ae6898851512daa96 100644
--- a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/WorkspacePresenter/ViewNotifiable.h
+++ b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/WorkspacePresenter/ViewNotifiable.h
@@ -52,6 +52,7 @@ public:
     ShowInstrumentView,
     PlotSpectrum,
     PlotSpectrumWithErrors,
+    PlotSpectrumAdvanced,
     ShowColourFillPlot,
     ShowDetectorsTable,
     ShowBoxDataTable,
@@ -67,8 +68,6 @@ public:
     ConvertToMatrixWorkspace,
     ConvertMDHistoToMatrixWorkspace,
     ClearUBMatrix,
-    ShowSurfacePlot,
-    ShowContourPlot,
     RefreshWorkspaces
   };
 
diff --git a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/WorkspacePresenter/WorkspaceDockMockObjects.h b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/WorkspacePresenter/WorkspaceDockMockObjects.h
index 483e827c67457079615cd0e09337368771d0ade1..1ba82c1b71d566e68589508e07a259e506d30d0c 100644
--- a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/WorkspacePresenter/WorkspaceDockMockObjects.h
+++ b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/WorkspacePresenter/WorkspaceDockMockObjects.h
@@ -66,7 +66,7 @@ public:
   MOCK_METHOD0(showWorkspaceData, void());
   MOCK_METHOD0(showInstrumentView, void());
   MOCK_METHOD0(saveToProgram, void());
-  MOCK_METHOD1(plotSpectrum, void(bool showErrors));
+  MOCK_METHOD1(plotSpectrum, void(const std::string type));
   MOCK_METHOD0(showColourFillPlot, void());
   MOCK_METHOD0(showDetectorsTable, void());
   MOCK_METHOD0(showBoxDataTable, void());
diff --git a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/WorkspacePresenter/WorkspacePresenter.h b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/WorkspacePresenter/WorkspacePresenter.h
index 60698106db3ad59afbc709c2c9dc93b4fa40ae5e..04fbd5b73e79a4db2e9a2e61a169b0620974e3d2 100644
--- a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/WorkspacePresenter/WorkspacePresenter.h
+++ b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/WorkspacePresenter/WorkspacePresenter.h
@@ -75,6 +75,7 @@ private:
   void saveToProgram();
   void plotSpectrum();
   void plotSpectrumWithErrors();
+  void plotSpectrumAdvanced();
   void showColourFillPlot();
   void showDetectorsTable();
   void showBoxDataTable();
@@ -90,8 +91,6 @@ private:
   void convertToMatrixWorkspace();
   void convertMDHistoToMatrixWorkspace();
   void clearUBMatrix();
-  void showSurfacePlot();
-  void showContourPlot();
   void refreshWorkspaces();
 
   void workspaceLoaded();
diff --git a/MantidQt/MantidWidgets/src/FitPropertyBrowser.cpp b/MantidQt/MantidWidgets/src/FitPropertyBrowser.cpp
index 40f7b16fee79644f5bc37632a48de09d77ff88fc..7ec03b415df35d7919c065cc2b5817f67706cfa6 100644
--- a/MantidQt/MantidWidgets/src/FitPropertyBrowser.cpp
+++ b/MantidQt/MantidWidgets/src/FitPropertyBrowser.cpp
@@ -1983,10 +1983,10 @@ void FitPropertyBrowser::addTieToFunction() {
   int iPar = -1;
   for (size_t i = 0; i < m_compositeFunction->nParams(); i++) {
     Mantid::API::ParameterReference ref(m_compositeFunction.get(), i);
-    Mantid::API::IFunction *fun = ref.getFunction();
+    Mantid::API::IFunction *fun = ref.getLocalFunction();
 
     // Pick out parameters with the same name as the one we're tying from
-    if (fun->parameterName(static_cast<int>(ref.getIndex())) == parName) {
+    if (fun->parameterName(static_cast<int>(ref.getLocalIndex())) == parName) {
       if (iPar == -1 &&
           fun ==
               h->function()
diff --git a/MantidQt/MantidWidgets/src/MantidSurfacePlotDialog.cpp b/MantidQt/MantidWidgets/src/MantidSurfacePlotDialog.cpp
deleted file mode 100644
index 9d48784ae6b9f38fa21a8498ce795a02031e238d..0000000000000000000000000000000000000000
--- a/MantidQt/MantidWidgets/src/MantidSurfacePlotDialog.cpp
+++ /dev/null
@@ -1,272 +0,0 @@
-#include "MantidQtMantidWidgets/MantidSurfacePlotDialog.h"
-#include "MantidAPI/ExperimentInfo.h"
-#include "MantidAPI/IMDWorkspace.h"
-#include "MantidQtMantidWidgets/MantidDisplayBase.h"
-#include "MantidAPI/Run.h"
-#include <QMessageBox>
-#include <QPushButton>
-
-using Mantid::API::IMDWorkspace;
-using Mantid::API::IMDWorkspace_sptr;
-using Mantid::API::ExperimentInfo;
-
-namespace MantidQt {
-namespace MantidWidgets {
-/// The string "Workspace index"
-const QString MantidSurfacePlotDialog::WORKSPACE_INDEX = "Workspace index";
-
-/// The string "Custom"
-const QString MantidSurfacePlotDialog::CUSTOM = "Custom";
-
-/// Minimum width for dialog to fit the title string in
-const int MantidSurfacePlotDialog::MINIMUM_WIDTH = 275;
-
-/**
- * Construct an object of this type
- * @param mui :: The MantidUI area
- * @param flags :: Window flags that are passed the the QDialog constructor
- * @param wsNames :: the names of the workspaces to be plotted
- * @param plotType :: Type of plot (for window title)
- */
-MantidSurfacePlotDialog::MantidSurfacePlotDialog(MantidDisplayBase *mui,
-                                                 Qt::WFlags flags,
-                                                 QList<QString> wsNames,
-                                                 const QString &plotType)
-    : QDialog(mui->getParent(), flags), m_widget(this, flags, wsNames, false),
-      m_mantidUI(mui), m_wsNames(wsNames), m_accepted(false) {
-  // Set up UI.
-  init(plotType);
-}
-
-/**
- * Set up layout of dialog
- * @param plotType :: Type of plot (for window title)
- */
-void MantidSurfacePlotDialog::init(const QString &plotType) {
-  m_outer = new QVBoxLayout();
-  QString title(plotType);
-  title.append(tr(" plot versus log value"));
-  setWindowTitle(title);
-  m_outer->insertWidget(1, &m_widget);
-  initLogs();
-  initButtons();
-  setLayout(m_outer);
-  this->setMinimumWidth(MINIMUM_WIDTH);
-}
-
-/**
- * Set up UI to choose a log and name of axis
- */
-void MantidSurfacePlotDialog::initLogs() {
-
-  m_logBox = new QVBoxLayout;
-  m_logLabel = new QLabel(tr("Log value to plot against:"));
-  m_logSelector = new QComboBox();
-  populateLogComboBox();
-  m_axisLabel = new QLabel(tr("<br>Label for plot axis:"));
-  m_axisNameEdit = new QLineEdit();
-  m_axisNameEdit->setText(m_logSelector->currentText());
-  m_customLogLabel = new QLabel(tr("<br>Custom log values:"));
-  m_logValues = new QLineEdit();
-
-  m_logBox->addWidget(m_logLabel);
-  m_logBox->addWidget(m_logSelector);
-  m_logBox->addWidget(m_customLogLabel);
-  m_logBox->addWidget(m_logValues);
-  m_logBox->addWidget(m_axisLabel);
-  m_logBox->addWidget(m_axisNameEdit);
-  m_outer->addItem(m_logBox);
-
-  m_logValues->setEnabled(false);
-
-  connect(m_logSelector, SIGNAL(currentIndexChanged(const QString &)), this,
-          SLOT(onLogSelected(const QString &)));
-}
-
-/**
- * Set up buttons on UI (OK/Cancel)
- */
-void MantidSurfacePlotDialog::initButtons() {
-  m_buttonBox = new QHBoxLayout;
-
-  m_okButton = new QPushButton("OK");
-  m_cancelButton = new QPushButton("Cancel");
-
-  m_buttonBox->addWidget(m_okButton);
-  m_buttonBox->addWidget(m_cancelButton);
-
-  m_outer->addItem(m_buttonBox);
-
-  connect(m_okButton, SIGNAL(clicked()), this, SLOT(plot()));
-  connect(m_cancelButton, SIGNAL(clicked()), this, SLOT(close()));
-}
-
-/**
- * Populate the log combo box with all log names that
- * have single numeric value per workspace (and occur
- * in every workspace)
- */
-void MantidSurfacePlotDialog::populateLogComboBox() {
-  // First item should be "Workspace index"
-  m_logSelector->addItem(WORKSPACE_INDEX);
-
-  // Create a table of all single-value numeric log names versus
-  // how many workspaces they appear in
-  std::map<std::string, int> logCounts;
-  for (auto wsName : m_wsNames) {
-    auto ws = m_mantidUI->getWorkspace(wsName);
-    if (ws) {
-      // It should be MatrixWorkspace, which is an ExperimentInfo
-      auto ei = boost::dynamic_pointer_cast<const ExperimentInfo>(ws);
-      if (ei) {
-        const std::vector<Mantid::Kernel::Property *> &logData =
-            ei->run().getLogData();
-        for (auto log : logData) {
-          // If this is a single-value numeric log, add it to the list of counts
-          if (dynamic_cast<Mantid::Kernel::PropertyWithValue<int> *>(log) ||
-              dynamic_cast<Mantid::Kernel::PropertyWithValue<double> *>(log)) {
-            const std::string name = log->name();
-            if (logCounts.find(name) != logCounts.end()) {
-              logCounts[name]++;
-            } else {
-              logCounts[name] = 1;
-            }
-          }
-        }
-      }
-    }
-  }
-
-  // Add the log names to the combo box if they appear in all workspaces
-  const int nWorkspaces = m_wsNames.size();
-  for (auto logCount : logCounts) {
-    if (logCount.second == nWorkspaces) {
-      m_logSelector->addItem(logCount.first.c_str());
-    }
-  }
-
-  // Add "Custom" at the end of the list
-  m_logSelector->addItem(CUSTOM);
-}
-
-/**
- * Gets the log that user selected to plot against
- * @returns Name of log, or "Workspace index"
- */
-const QString MantidSurfacePlotDialog::getLogName() const {
-  return m_logSelector->currentText();
-}
-
-/**
- * Gets the name that the user gave for the Y axis of the surface plot
- * @returns Name input by user for axis
- */
-const QString MantidSurfacePlotDialog::getAxisName() const {
-  return m_axisNameEdit->text();
-}
-
-/**
- * Returns a structure holding all of the selected options.
- * @returns Struct holding user input
- */
-MantidSurfacePlotDialog::UserInputSurface
-MantidSurfacePlotDialog::getSelections() const {
-  UserInputSurface selections;
-  selections.accepted = m_accepted;
-  selections.plotIndex = getPlot();
-  selections.axisName = getAxisName();
-  selections.logName = getLogName();
-  if (selections.logName == CUSTOM) {
-    try {
-      selections.customLogValues = getCustomLogValues();
-    } catch (const std::invalid_argument &ex) {
-      QString error("Invalid log value supplied: ");
-      showPlotOptionsError(error.append(ex.what()));
-      selections.accepted = false;
-    }
-  }
-  return selections;
-}
-
-/**
-* Returns the workspace index to be plotted
-* @returns Workspace index to be plotted
-*/
-int MantidSurfacePlotDialog::getPlot() const {
-  int spectrumIndex = 0; // default to 0
-  const auto userInput = m_widget.getPlots();
-
-  if (!userInput.empty()) {
-    const auto indexList = userInput.values();
-    if (!indexList.empty()) {
-      const auto spectrumIndexes = indexList.at(0);
-      if (!spectrumIndexes.empty()) {
-        spectrumIndex = *spectrumIndexes.begin();
-      }
-    }
-  }
-  return spectrumIndex;
-}
-
-/**
- * Called when OK button pressed
- */
-void MantidSurfacePlotDialog::plot() {
-  if (m_widget.plotRequested()) {
-    m_accepted = true;
-    accept();
-  }
-}
-
-/**
- * Called when log selection changed
- * If "Custom" selected, enable the custom log input box.
- * Otherwise, it is read-only.
- * Also put the log name into the axis name box as a default choice.
- * @param logName :: [input] Text selected in combo box
- */
-void MantidSurfacePlotDialog::onLogSelected(const QString &logName) {
-  m_logValues->setEnabled(logName == CUSTOM);
-  m_logValues->clear();
-  m_axisNameEdit->setText(logName);
-}
-
-/**
- * If "Custom" is selected as log, returns the list of values the user has input
- * into the edit box, otherwise returns an empty set.
- * Note that the set is ordered by definition, and values are only added if they
- * are successfully converted to a double.
- * @returns Set of numerical log values
- * @throws invalid_argument if values are not numeric
- */
-const std::set<double> MantidSurfacePlotDialog::getCustomLogValues() const {
-  std::set<double> logValues;
-  if (m_logSelector->currentText() == CUSTOM) {
-    QStringList values = m_logValues->text().split(',');
-    foreach (QString value, values) {
-      bool ok = false;
-      double number = value.toDouble(&ok);
-      if (ok) {
-        logValues.insert(number);
-      } else {
-        throw std::invalid_argument(value.toStdString());
-      }
-    }
-  }
-  return logValues;
-}
-
-/**
- * Displays a message box with the supplied error string.
- * @param message :: [input] Error message to display
- */
-void MantidSurfacePlotDialog::showPlotOptionsError(const QString &message) {
-  if (!message.isEmpty()) {
-    QMessageBox errorMessage;
-    errorMessage.setText(message);
-    errorMessage.setIcon(QMessageBox::Critical);
-    errorMessage.exec();
-  }
-}
-}
-}
\ No newline at end of file
diff --git a/MantidQt/MantidWidgets/src/MantidTreeWidget.cpp b/MantidQt/MantidWidgets/src/MantidTreeWidget.cpp
index 3cf6cd9acaf41f703e59465d40174db4cc830a13..5b3f5a964ad22ac1cae6b14acba85b7f38b30dcf 100644
--- a/MantidQt/MantidWidgets/src/MantidTreeWidget.cpp
+++ b/MantidQt/MantidWidgets/src/MantidTreeWidget.cpp
@@ -189,24 +189,33 @@ QList<MatrixWorkspace_const_sptr>
 MantidTreeWidget::getSelectedMatrixWorkspaces() const {
   // Check for any selected WorkspaceGroup names and replace with the names of
   // their children.
-  QSet<QString> selectedWsNames;
+  // We preserve the order, but use a set to avoid adding duplicate workspaces.
+  std::set<QString> selectedWsNameSet;
+  std::vector<QString> selectedWsNameList;
   foreach (const QString wsName, this->getSelectedWorkspaceNames()) {
     const auto groupWs = boost::dynamic_pointer_cast<const WorkspaceGroup>(
         m_ads.retrieve(wsName.toStdString()));
     if (groupWs) {
       const auto childWsNames = groupWs->getNames();
-      for (auto childWsName : childWsNames) {
-        selectedWsNames.insert(QString::fromStdString(childWsName));
+      for (const auto &childWsName : childWsNames) {
+        if (selectedWsNameSet.find(QString::fromStdString(childWsName)) ==
+            selectedWsNameSet.end()) {
+          selectedWsNameSet.insert(QString::fromStdString(childWsName));
+          selectedWsNameList.push_back(QString::fromStdString(childWsName));
+        }
       }
     } else {
-      selectedWsNames.insert(wsName);
+      if (selectedWsNameSet.find(wsName) == selectedWsNameSet.end()) {
+        selectedWsNameSet.insert(wsName);
+        selectedWsNameList.push_back(wsName);
+      }
     }
   }
 
   // Get the names of, and pointers to, the MatrixWorkspaces only.
   QList<MatrixWorkspace_const_sptr> selectedMatrixWsList;
   QList<QString> selectedMatrixWsNameList;
-  foreach (const auto selectedWsName, selectedWsNames) {
+  foreach (const auto selectedWsName, selectedWsNameList) {
     const auto matrixWs = boost::dynamic_pointer_cast<const MatrixWorkspace>(
         m_ads.retrieve(selectedWsName.toStdString()));
     if (matrixWs) {
@@ -226,11 +235,13 @@ MantidTreeWidget::getSelectedMatrixWorkspaces() const {
 * @param showPlotAll :: [input] If true, show the "Plot All" button on the
 * dialog
 * @param showTiledOpt :: [input] If true, show the "Tiled" option on the dialog
+* @param isAdvanced :: [input] If true, advanced plotting being done
 * @return :: A MantidWSIndexDialog::UserInput structure listing the selected
 * options
 */
 MantidWSIndexWidget::UserInput MantidTreeWidget::chooseSpectrumFromSelected(
-    bool showWaterfallOpt, bool showPlotAll, bool showTiledOpt) const {
+    bool showWaterfallOpt, bool showPlotAll, bool showTiledOpt,
+    bool isAdvanced) const {
   auto selectedMatrixWsList = getSelectedMatrixWorkspaces();
   QList<QString> selectedMatrixWsNameList;
   foreach (const auto matrixWs, selectedMatrixWsList) {
@@ -238,102 +249,47 @@ MantidWSIndexWidget::UserInput MantidTreeWidget::chooseSpectrumFromSelected(
         QString::fromStdString(matrixWs->getName()));
   }
 
-  // Check to see if all workspaces have only a single spectrum ...
-  bool allSingleWorkspaces = true;
-  foreach (const auto selectedMatrixWs, selectedMatrixWsList) {
-    if (selectedMatrixWs->getNumberHistograms() != 1) {
-      allSingleWorkspaces = false;
-      break;
+  // Check workspaces to see whether to plot immediately without dialog box
+  bool plotImmediately = true;
+  if (isAdvanced) {
+    plotImmediately = selectedMatrixWsList.size() == 1 &&
+                      selectedMatrixWsList[0]->getNumberHistograms() == 1;
+  } else {
+    foreach (const auto selectedMatrixWs, selectedMatrixWsList) {
+      if (selectedMatrixWs->getNumberHistograms() != 1) {
+        plotImmediately = false;
+        break;
+      }
     }
   }
 
   // ... and if so, just return all workspace names mapped to workspace index 0;
-  if (allSingleWorkspaces) {
+  if (plotImmediately) {
     const std::set<int> SINGLE_SPECTRUM = {0};
     QMultiMap<QString, std::set<int>> spectrumToPlot;
     foreach (const auto selectedMatrixWs, selectedMatrixWsList) {
       spectrumToPlot.insert(QString::fromStdString(selectedMatrixWs->getName()),
                             SINGLE_SPECTRUM);
     }
+    // and get simple 1D plot done
     MantidWSIndexWidget::UserInput selections;
     selections.plots = spectrumToPlot;
+    selections.simple = true;
     selections.waterfall = false;
     selections.tiled = false;
+    selections.surface = false;
+    selections.contour = false;
     return selections;
   }
 
   // Else, one or more workspaces
   auto dio = m_mantidUI->createWorkspaceIndexDialog(
-      0, selectedMatrixWsNameList, showWaterfallOpt, showPlotAll, showTiledOpt);
+      0, selectedMatrixWsNameList, showWaterfallOpt, showPlotAll, showTiledOpt,
+      isAdvanced);
   dio->exec();
   return dio->getSelections();
 }
 
-/**
-* Allows users to choose spectra from the selected workspaces by presenting them
-* with a dialog box, and also allows choice of a log to plot against and a name
-* for this axis.
-* @param type :: [input] Type of plot (for dialog title)
-* @param nWorkspaces :: [input] Number of workspaces in selected group
-* @returns :: A structure listing the selected options
-*/
-MantidSurfacePlotDialog::UserInputSurface
-MantidTreeWidget::choosePlotOptions(const QString &type,
-                                    int nWorkspaces) const {
-  auto selectedMatrixWsList = getSelectedMatrixWorkspaces();
-  QList<QString> selectedMatrixWsNameList;
-  foreach (const auto matrixWs, selectedMatrixWsList) {
-    selectedMatrixWsNameList.append(
-        QString::fromStdString(matrixWs->getName()));
-  }
-  auto *dlg =
-      m_mantidUI->createSurfacePlotDialog(0, selectedMatrixWsNameList, type);
-  dlg->exec();
-  auto selections = dlg->getSelections();
-  std::stringstream err;
-
-  if (selections.accepted) {
-    if (selections.logName == MantidSurfacePlotDialog::CUSTOM) {
-      // Check number of values supplied
-      if (static_cast<int>(selections.customLogValues.size()) != nWorkspaces) {
-        err << "Number of custom log values must be equal to "
-               "number of workspaces in group";
-        selections.accepted = false;
-      }
-    }
-  }
-
-  auto errors = err.str();
-  if (!errors.empty()) {
-    MantidSurfacePlotDialog::showPlotOptionsError(errors.c_str());
-  }
-  return selections;
-}
-
-/**
-* Allows users to choose spectra from the selected workspaces by presenting them
-* with a dialog box, and also allows choice of a log to plot against and a name
-* for this axis.
-* @param nWorkspaces :: [input] Number of workspaces in selected group
-* @returns :: A structure listing the selected options
-*/
-MantidSurfacePlotDialog::UserInputSurface
-MantidTreeWidget::chooseSurfacePlotOptions(int nWorkspaces) const {
-  return choosePlotOptions("Surface", nWorkspaces);
-}
-
-/**
-* Allows users to choose spectra from the selected workspaces by presenting them
-* with a dialog box, and also allows choice of a log to plot against and a name
-* for this axis.
-* @param nWorkspaces :: [input] Number of workspaces in selected group
-* @returns :: A structure listing the selected options
-*/
-MantidSurfacePlotDialog::UserInputSurface
-MantidTreeWidget::chooseContourPlotOptions(int nWorkspaces) const {
-  return choosePlotOptions("Contour", nWorkspaces);
-}
-
 void MantidTreeWidget::setSortScheme(MantidItemSortScheme sortScheme) {
   m_sortScheme = sortScheme;
 }
diff --git a/MantidQt/MantidWidgets/src/MantidWSIndexDialog.cpp b/MantidQt/MantidWidgets/src/MantidWSIndexDialog.cpp
index 773488d66b4a99b78c679031a8ca1e428a111070..c018c5876090e21248026aec0e1c3698e44e5df6 100644
--- a/MantidQt/MantidWidgets/src/MantidWSIndexDialog.cpp
+++ b/MantidQt/MantidWidgets/src/MantidWSIndexDialog.cpp
@@ -1,19 +1,34 @@
 #include "MantidQtMantidWidgets/MantidWSIndexDialog.h"
 #include "MantidAPI/AnalysisDataService.h"
 #include "MantidAPI/Axis.h"
-#include "MantidAPI/MatrixWorkspace.h"
+#include "MantidAPI/WorkspaceGroup.h"
+#include "MantidAPI/Run.h"
 #include "MantidAPI/SpectraDetectorTypes.h"
 
 #include <QPalette>
 #include <QPushButton>
 #include <QRegExp>
 #include <QtAlgorithms>
+#include <QMessageBox>
 #include <boost/lexical_cast.hpp>
 #include <exception>
 #include <stdlib.h>
 
 namespace MantidQt {
 namespace MantidWidgets {
+/// The string "Workspace index"
+const QString MantidWSIndexWidget::WORKSPACE_NAME = "Workspace name";
+const QString MantidWSIndexWidget::WORKSPACE_INDEX = "Workspace index";
+
+/// The string "Custom"
+const QString MantidWSIndexWidget::CUSTOM = "Custom";
+
+// String for plot types
+const QString MantidWSIndexWidget::SIMPLE_PLOT = "1D Plot";
+const QString MantidWSIndexWidget::WATERFALL_PLOT = "Waterfall Plot";
+const QString MantidWSIndexWidget::SURFACE_PLOT = "Surface Plot";
+const QString MantidWSIndexWidget::CONTOUR_PLOT = "Contour Plot";
+
 //----------------------------------
 // MantidWSIndexWidget methods
 //----------------------------------
@@ -24,15 +39,18 @@ namespace MantidWidgets {
  * @param wsNames :: the names of the workspaces to be plotted
  * @param showWaterfallOption :: true if waterfall plot enabled
  * @param showTiledOption :: true if tiled plot enabled
+ * @param isAdvanced :: true if advanced plotting has been selected
  */
 MantidWSIndexWidget::MantidWSIndexWidget(QWidget *parent, Qt::WFlags flags,
-                                         QList<QString> wsNames,
+                                         const QList<QString> &wsNames,
                                          const bool showWaterfallOption,
-                                         const bool showTiledOption)
+                                         const bool showTiledOption,
+                                         const bool isAdvanced)
     : QWidget(parent, flags), m_spectra(false),
       m_waterfall(showWaterfallOption), m_tiled(showTiledOption),
-      m_plotOptions(), m_wsNames(wsNames), m_wsIndexIntervals(),
-      m_spectraNumIntervals(), m_wsIndexChoice(), m_spectraIdChoice() {
+      m_advanced(isAdvanced), m_plotOptions(), m_wsNames(wsNames),
+      m_wsIndexIntervals(), m_spectraNumIntervals(), m_wsIndexChoice(),
+      m_spectraNumChoice() {
   checkForSpectraAxes();
   // Generate the intervals allowed to be plotted by the user.
   generateWsIndexIntervals();
@@ -46,14 +64,124 @@ MantidWSIndexWidget::MantidWSIndexWidget(QWidget *parent, Qt::WFlags flags,
  * Returns the user-selected options
  * @returns Struct containing user options
  */
-MantidWSIndexWidget::UserInput MantidWSIndexWidget::getSelections() const {
+MantidWSIndexWidget::UserInput MantidWSIndexWidget::getSelections() {
   UserInput options;
   options.plots = getPlots();
+  options.simple = is1DPlotSelected();
   options.waterfall = isWaterfallPlotSelected();
   options.tiled = isTiledPlotSelected();
+  if (m_advanced) {
+    options.surface = isSurfacePlotSelected();
+    options.errors = isErrorBarsSelected();
+    options.contour = isContourPlotSelected();
+  } else {
+    options.surface = false;
+    options.errors = false;
+    options.contour = false;
+  }
+
+  // Advanced options
+  if (m_advanced && (options.simple || options.waterfall || options.surface ||
+                     options.contour)) {
+    UserInputAdvanced userInputAdvanced;
+    if (options.surface || options.contour) {
+      userInputAdvanced.accepted = true;
+      userInputAdvanced.plotIndex = getPlotIndex();
+      userInputAdvanced.axisName = getAxisName();
+    }
+    userInputAdvanced.logName = getLogName();
+    if (userInputAdvanced.logName == WORKSPACE_NAME ||
+        userInputAdvanced.logName == WORKSPACE_INDEX) {
+      // We want default names in legend, if log is workspace name or index
+      userInputAdvanced.logName = "";
+    }
+    userInputAdvanced.workspaceNames = m_wsNames;
+    if (userInputAdvanced.logName == CUSTOM) {
+      userInputAdvanced.customLogValues = getCustomLogValues();
+      if (userInputAdvanced.customLogValues.empty()) {
+        userInputAdvanced.accepted = false;
+      }
+    }
+    options.isAdvanced = true;
+    options.advanced = userInputAdvanced;
+  } else {
+    options.isAdvanced =
+        false; // We don't want the view to look at options.advanced.
+  }
   return options;
 }
 
+/**
+* Returns the workspace index to be plotted
+* @returns Workspace index to be plotted
+*/
+int MantidWSIndexWidget::getPlotIndex() const {
+  int spectrumIndex = 0; // default to 0
+  const auto userInput = getPlots();
+
+  if (!userInput.empty()) {
+    const auto indexList = userInput.values();
+    if (!indexList.empty()) {
+      const auto spectrumIndexes = indexList.at(0);
+      if (!spectrumIndexes.empty()) {
+        spectrumIndex = *spectrumIndexes.begin();
+      }
+    }
+  }
+  return spectrumIndex;
+}
+
+/**
+* Displays a message box with the supplied error string.
+* @param message :: [input] Error message to display
+*/
+void MantidWSIndexWidget::showPlotOptionsError(const QString &message) {
+  if (!message.isEmpty()) {
+    QMessageBox errorMessage;
+    errorMessage.setText(message);
+    errorMessage.setIcon(QMessageBox::Critical);
+    errorMessage.exec();
+  }
+}
+
+/**
+* If "Custom" is selected as log, returns the list of values the user has input
+* into the edit box, otherwise returns an empty set.
+* Note that the set is ordered by definition, and values are only added if they
+* are successfully converted to a double.
+* @returns Set of numerical log values
+*/
+const std::set<double> MantidWSIndexWidget::getCustomLogValues() const {
+  std::set<double> logValues;
+  if (m_logSelector->currentText() == CUSTOM) {
+    QStringList values = m_logValues->lineEdit()->text().split(',');
+    foreach (QString value, values) {
+      bool ok = false;
+      double number = value.toDouble(&ok);
+      if (ok) {
+        logValues.insert(number);
+      }
+    }
+  }
+  return logValues;
+}
+
+/**
+* Gets the name that the user gave for the Y axis of the surface plot
+* @returns Name input by user for axis
+*/
+const QString MantidWSIndexWidget::getAxisName() const {
+  return m_axisNameEdit->lineEdit()->text();
+}
+
+/**
+* Gets the log that user selected to plot against
+* @returns Name of log, or "Workspace index"
+*/
+const QString MantidWSIndexWidget::getLogName() const {
+  return m_logSelector->currentText();
+}
+
 /**
  * Returns the user-selected plots
  * @returns Plots selected by user
@@ -71,7 +199,7 @@ QMultiMap<QString, std::set<int>> MantidWSIndexWidget::getPlots() const {
     }
   }
   // Else if the user typed in the spectraField ...
-  else if (m_spectraIdChoice.getList().size() > 0) {
+  else if (m_spectraNumChoice.getList().size() > 0) {
     for (int i = 0; i < m_wsNames.size(); i++) {
       // Convert the spectra choices of the user into workspace indices for us
       // to use.
@@ -85,7 +213,7 @@ QMultiMap<QString, std::set<int>> MantidWSIndexWidget::getPlots() const {
       const Mantid::spec2index_map spec2index =
           ws->getSpectrumToWorkspaceIndexMap();
 
-      std::set<int> origSet = m_spectraIdChoice.getIntSet();
+      std::set<int> origSet = m_spectraNumChoice.getIntSet();
       std::set<int>::iterator it = origSet.begin();
       std::set<int> convertedSet;
 
@@ -103,29 +231,59 @@ QMultiMap<QString, std::set<int>> MantidWSIndexWidget::getPlots() const {
 }
 
 /**
- * Whether the user checked the "waterfall" box
+* Whether the user selected "1D plot"
+* @returns True if 1D plot selected
+*/
+bool MantidWSIndexWidget::is1DPlotSelected() const {
+  return (m_plotOptions->currentText() == SIMPLE_PLOT);
+}
+
+/**
+ * Whether the user selected "waterfall"
  * @returns True if waterfall plot selected
  */
 bool MantidWSIndexWidget::isWaterfallPlotSelected() const {
-  return (m_plotOptions->currentText() == "Waterfall Plot");
+  return (m_plotOptions->currentText() == WATERFALL_PLOT);
 }
 
 /**
- * Whether the user checked the "tiled" box
+ * Whether the user selected "tiled"
  * @returns True if tiled plot selected
  */
 bool MantidWSIndexWidget::isTiledPlotSelected() const {
   return (m_plotOptions->currentText() == "Tiled Plot");
 }
 
+/**
+* Whether the user selected surface plot
+* @returns True if surfarce plot selected
+*/
+bool MantidWSIndexWidget::isSurfacePlotSelected() const {
+  return (m_plotOptions->currentText() == SURFACE_PLOT);
+}
+
+/**
+* Whether the user selected contour plot
+* @returns True if surfarce plot selected
+*/
+bool MantidWSIndexWidget::isContourPlotSelected() const {
+  return (m_plotOptions->currentText() == CONTOUR_PLOT);
+}
+
+/**
+* Whether the user has selected plot with error bars
+* @returns True if error bars are selected
+*/
+bool MantidWSIndexWidget::isErrorBarsSelected() const {
+  return m_showErrorBars->checkState();
+}
+
 /**
  * Called when user edits workspace field
  */
 void MantidWSIndexWidget::editedWsField() {
-  if (usingSpectraNumbers()) {
-    m_spectraField->lineEdit()->clear();
-    m_spectraField->setError("");
-  }
+  m_spectraField->lineEdit()->clear();
+  m_spectraField->setError("");
 }
 
 /**
@@ -144,34 +302,138 @@ bool MantidWSIndexWidget::plotRequested() {
   bool acceptable = false;
   int npos = 0;
   QString wsText = m_wsField->lineEdit()->text();
-  QString spectraTest = m_spectraField->lineEdit()->text();
+  QString spectraText = m_spectraField->lineEdit()->text();
   QValidator::State wsState =
       m_wsField->lineEdit()->validator()->validate(wsText, npos);
   QValidator::State spectraState =
-      m_spectraField->lineEdit()->validator()->validate(spectraTest, npos);
+      m_spectraField->lineEdit()->validator()->validate(spectraText, npos);
   if (wsState == QValidator::Acceptable) {
     m_wsIndexChoice.addIntervals(m_wsField->lineEdit()->text());
+    m_usingWsIndexChoice = true;
+    m_usingSprectraNumChoice = false;
     acceptable = true;
   }
   // Else if the user typed in the spectraField ...
   else if (spectraState == QValidator::Acceptable) {
-    m_spectraIdChoice.addIntervals(m_spectraField->lineEdit()->text());
+    m_spectraNumChoice.addIntervals(m_spectraField->lineEdit()->text());
+    m_usingSprectraNumChoice = true;
+    m_usingWsIndexChoice = false;
     acceptable = true;
   } else {
+    m_usingSprectraNumChoice = false;
+    m_usingWsIndexChoice = false;
     QString error_message("Invalid input. It is not in the range available");
     if (!wsText.isEmpty())
       m_wsField->setError(error_message);
-    if (!spectraTest.isEmpty())
+    if (!spectraText.isEmpty())
       m_spectraField->setError(error_message);
+    if (wsText.isEmpty() && spectraText.isEmpty()) {
+      m_wsField->setError("Workspace indices or spectra numbers are needed");
+      m_spectraField->setError(
+          "Spectra numbers or workspace indices are needed");
+    }
   }
-  return acceptable;
+  // To give maximum feedback to user, we validate plot options,
+  // even if intervals are not acceptable
+  return validatePlotOptions() && acceptable;
 }
 
 /**
  * Called when dialog requests to plot all
  */
-void MantidWSIndexWidget::plotAllRequested() {
+bool MantidWSIndexWidget::plotAllRequested() {
   m_wsIndexChoice = m_wsIndexIntervals;
+  m_usingWsIndexChoice = true;
+  m_usingSprectraNumChoice = false;
+  return validatePlotOptions();
+}
+
+/**
+ * Validate plot options when a plot is requested
+ * set appropriate error if not valid
+ */
+bool MantidWSIndexWidget::validatePlotOptions() {
+
+  // Only bother is plotting is advanced
+  if (!m_advanced)
+    return true;
+
+  bool validOptions = true;
+
+  // We only validate the custom log values and
+  // only if custom logs are selected, else it's OK.
+  if (m_logSelector->currentText() == CUSTOM) {
+    QStringList values = m_logValues->lineEdit()->text().split(',');
+    bool firstValue = true;
+    double previousValue = 0.0;
+    foreach (QString value, values) {
+      bool ok = false;
+      double currentValue = value.toDouble(&ok);
+      // Check for non-numeric value
+      if (!ok) {
+        m_logValues->setError("A custom log value is not valid: " + value);
+        validOptions = false;
+        break;
+      }
+      // Check for order
+      if (firstValue) {
+        firstValue = false;
+        previousValue = currentValue;
+      } else {
+        if (previousValue < currentValue) {
+          previousValue = currentValue;
+        } else {
+          m_logValues->setError(
+              "The custom log values must be in numerical order and distinct.");
+          validOptions = false;
+          break;
+        }
+      }
+    }
+
+    if (validOptions) {
+      int numCustomLogValues = values.size();
+      QString nCustomLogValues;
+      nCustomLogValues.setNum(numCustomLogValues);
+      int numWorkspaces = m_wsNames.size();
+      if (m_plotOptions->currentText() == SURFACE_PLOT ||
+          m_plotOptions->currentText() == CONTOUR_PLOT) {
+        QString nWorkspaces;
+        nWorkspaces.setNum(numWorkspaces);
+
+        if (numCustomLogValues != numWorkspaces) {
+          m_logValues->setError("The number of custom log values (" +
+                                nCustomLogValues +
+                                ") is not equal to the number of workspaces (" +
+                                nWorkspaces + ").");
+          validOptions = false;
+        }
+      } else {
+        int numSpectra = 0;
+        if (m_usingWsIndexChoice)
+          numSpectra = m_wsIndexChoice.totalIntervalLength();
+        if (m_usingSprectraNumChoice)
+          numSpectra = m_spectraNumChoice.totalIntervalLength();
+        QString nPlots;
+        nPlots.setNum(numWorkspaces * numSpectra);
+
+        if (numCustomLogValues != numWorkspaces * numSpectra) {
+          m_logValues->setError(
+              "The number of custom log values (" + nCustomLogValues +
+              ") is not equal to the number of plots (" + nPlots + ").");
+          validOptions = false;
+        }
+      }
+    }
+  }
+
+  if (!validOptions) {
+    // Clear record of user choices, because they may change.
+    m_wsIndexChoice.clear();
+    m_spectraNumChoice.clear();
+  }
+
+  return validOptions;
 }
 
 /**
@@ -182,6 +444,9 @@ void MantidWSIndexWidget::init() {
   initSpectraBox();
   initWorkspaceBox();
   initOptionsBoxes();
+  if (m_advanced) {
+    initLogs();
+  }
   setLayout(m_outer);
 }
 
@@ -241,23 +506,176 @@ void MantidWSIndexWidget::initSpectraBox() {
  * Set up Options boxes UI
  */
 void MantidWSIndexWidget::initOptionsBoxes() {
-  m_optionsBox = new QHBoxLayout;
+  m_optionsBox = new QVBoxLayout;
 
+  m_plotOptionLabel = new QLabel(tr("Plot Type:"));
   if (m_waterfall || m_tiled) {
     m_plotOptions = new QComboBox();
-    m_plotOptions->addItem(tr("1D Plot"));
+    m_plotOptions->addItem(SIMPLE_PLOT);
     if (m_waterfall) {
-      m_plotOptions->addItem(tr("Waterfall Plot"));
+      m_plotOptions->addItem(WATERFALL_PLOT);
     }
     if (m_tiled) {
       m_plotOptions->addItem(tr("Tiled Plot"));
     }
+    if (m_advanced && isSuitableForContourOrSurfacePlot()) {
+      m_plotOptions->addItem(SURFACE_PLOT);
+      m_plotOptions->addItem(CONTOUR_PLOT);
+      connect(m_plotOptions, SIGNAL(currentIndexChanged(const QString &)), this,
+              SLOT(onPlotOptionChanged(const QString &)));
+    }
+    m_optionsBox->addWidget(m_plotOptionLabel);
     m_optionsBox->addWidget(m_plotOptions);
   }
 
+  if (m_advanced) {
+    int spacingAboveShowErrorBars = 10;
+    m_optionsBox->addSpacing(spacingAboveShowErrorBars);
+    m_showErrorBars = new QCheckBox("Show Error Bars");
+    m_optionsBox->addWidget(m_showErrorBars);
+  }
+
   m_outer->addItem(m_optionsBox);
 }
 
+void MantidWSIndexWidget::initLogs() {
+  m_logOptionsGroup = new QGroupBox(tr("Log Options"));
+  m_logBox = new QVBoxLayout;
+
+  m_logLabel = new QLabel(tr("Log value to plot against:"));
+  m_logSelector = new QComboBox();
+  populateLogComboBox();
+
+  m_customLogLabel = new QLabel(tr("<br>Custom log values:"));
+  m_logValues = new QLineEditWithErrorMark();
+
+  m_axisLabel = new QLabel(tr("<br>Label for plot axis:"));
+  m_axisNameEdit = new QLineEditWithErrorMark();
+  m_axisNameEdit->lineEdit()->setText(m_logSelector->currentText());
+
+  m_logBox->addWidget(m_logLabel);
+  m_logBox->addWidget(m_logSelector);
+  m_logBox->addWidget(m_customLogLabel);
+  m_logBox->addWidget(m_logValues);
+  m_logBox->addWidget(m_axisLabel);
+  m_logBox->addWidget(m_axisNameEdit);
+
+  m_logSelector->setEnabled(true);
+  m_logValues->setEnabled(false);
+  m_axisNameEdit->setEnabled(false);
+
+  m_logOptionsGroup->setLayout(m_logBox);
+
+  m_outer->addWidget(m_logOptionsGroup);
+
+  connect(m_logSelector, SIGNAL(currentIndexChanged(const QString &)), this,
+          SLOT(onLogSelected(const QString &)));
+}
+
+/**
+* Called when log selection changed
+* If "Custom" selected, enable the custom log input box.
+* Otherwise, it is read-only.
+* Also put the log name into the axis name box as a default choice.
+* @param logName :: [input] Text selected in combo box
+*/
+void MantidWSIndexWidget::onLogSelected(const QString &logName) {
+  m_logValues->setEnabled(logName == CUSTOM);
+  m_logValues->lineEdit()->clear();
+  m_axisNameEdit->lineEdit()->setText(logName);
+}
+
+/**
+* Called when plot option is changed
+* @param plotOption :: [input] New plot option
+*/
+void MantidWSIndexWidget::onPlotOptionChanged(const QString &plotOption) {
+  auto useLogNames = m_advanced && isSuitableForLogValues(plotOption);
+  auto isLogSelectorCustom = m_logSelector->currentText() == CUSTOM;
+  auto isSurfaceOrContourPlot = m_plotOptions->currentText() == SURFACE_PLOT ||
+                                m_plotOptions->currentText() == CONTOUR_PLOT;
+  // Enable widgets as appropriate
+  m_showErrorBars->setEnabled(!isSurfaceOrContourPlot);
+  m_logSelector->setEnabled(useLogNames);
+  m_logValues->setEnabled(useLogNames && isLogSelectorCustom);
+  m_axisNameEdit->setEnabled(isSurfaceOrContourPlot);
+  if (useLogNames) {
+    // Make sure an appropriate name is shown for the default log option.
+    if (m_plotOptions->currentText() == SURFACE_PLOT ||
+        m_plotOptions->currentText() == CONTOUR_PLOT) {
+      m_logSelector->setItemText(0, WORKSPACE_INDEX);
+      if (m_axisNameEdit->lineEdit()->text() == WORKSPACE_NAME) {
+        m_axisNameEdit->lineEdit()->setText(WORKSPACE_INDEX);
+      }
+    } else {
+      m_logSelector->setItemText(0, WORKSPACE_NAME);
+    }
+  }
+}
+
+/**
+* Populate the log combo box with all log names that
+* have single numeric value per workspace (and occur
+* in every workspace)
+*/
+void MantidWSIndexWidget::populateLogComboBox() {
+  // First item should be "Workspace index"
+  m_logSelector->addItem(WORKSPACE_NAME);
+
+  // Create a table of all single-value numeric log names versus
+  // how many workspaces they appear in
+  std::map<std::string, int> logCounts;
+  for (auto &wsName : m_wsNames) {
+    auto ws = getWorkspace(wsName);
+    if (ws) {
+      const std::vector<Mantid::Kernel::Property *> &logData =
+          ws->run().getLogData();
+      for (auto &log : logData) {
+        // If this is a single-value numeric log, add it to the list of counts
+        if (dynamic_cast<Mantid::Kernel::PropertyWithValue<int> *>(log) ||
+            dynamic_cast<Mantid::Kernel::PropertyWithValue<double> *>(log)) {
+          const std::string name = log->name();
+          if (logCounts.find(name) != logCounts.end()) {
+            logCounts[name]++;
+          } else {
+            logCounts[name] = 1;
+          }
+        }
+      }
+    }
+  }
+
+  // Add the log names to the combo box if they appear in all workspaces
+  const int nWorkspaces = m_wsNames.size();
+  for (auto &logCount : logCounts) {
+    if (logCount.second == nWorkspaces) {
+      m_logSelector->addItem(logCount.first.c_str());
+    }
+  }
+
+  // Add "Custom" at the end of the list
+  m_logSelector->addItem(CUSTOM);
+}
+
+Mantid::API::MatrixWorkspace_const_sptr
+MantidWSIndexWidget::getWorkspace(const QString &workspaceName) const {
+  return boost::dynamic_pointer_cast<const Mantid::API::MatrixWorkspace>(
+      Mantid::API::AnalysisDataService::Instance().retrieve(
+          workspaceName.toStdString()));
+}
+
+// True if selected plot is suitable for plotting as contour of surface plot
+bool MantidWSIndexWidget::isSuitableForContourOrSurfacePlot() const {
+  return (m_wsNames.size() > 2);
+}
+
+// True if selected plot is suitable for putting log values in
+bool MantidWSIndexWidget::isSuitableForLogValues(
+    const QString &plotOption) const {
+  return (plotOption == SIMPLE_PLOT || plotOption == WATERFALL_PLOT ||
+          plotOption == SURFACE_PLOT || plotOption == CONTOUR_PLOT);
+}
+
 /**
 * Check to see if *all* workspaces have a spectrum axis.
 * If even one does not have a spectra axis, then we wont
@@ -367,25 +785,28 @@ bool MantidWSIndexWidget::usingSpectraNumbers() const {
  * @param wsNames :: the names of the workspaces to be plotted
  * @param showWaterfallOption :: If true the waterfall checkbox is created
  * @param showPlotAll :: If true the "Plot all" button is created
- * @param showTiledOption :: If true the "Tiled" checkbox is created
+ * @param showTiledOption :: If true the "Tiled" option is created
+ * @param isAdvanced :: true if adanced plotting dialog is created
  */
 MantidWSIndexDialog::MantidWSIndexDialog(QWidget *parent, Qt::WFlags flags,
-                                         QList<QString> wsNames,
+                                         const QList<QString> &wsNames,
                                          const bool showWaterfallOption,
                                          const bool showPlotAll,
-                                         const bool showTiledOption)
+                                         const bool showTiledOption,
+                                         const bool isAdvanced)
     : QDialog(parent, flags),
-      m_widget(this, flags, wsNames, showWaterfallOption, showTiledOption),
+      m_widget(this, flags, wsNames, showWaterfallOption, showTiledOption,
+               isAdvanced),
       m_plotAll(showPlotAll) {
   // Set up UI.
-  init();
+  init(isAdvanced);
 }
 
 /**
  * Returns the user-selected options
  * @returns Struct containing user options
  */
-MantidWSIndexWidget::UserInput MantidWSIndexDialog::getSelections() const {
+MantidWSIndexWidget::UserInput MantidWSIndexDialog::getSelections() {
   return m_widget.getSelections();
 }
 
@@ -399,7 +820,15 @@ QMultiMap<QString, std::set<int>> MantidWSIndexDialog::getPlots() const {
 }
 
 /**
- * Whether the user checked the "waterfall" box
+* Whether the user selected the simple 1D plot
+* @returns True if waterfall plot selected
+*/
+bool MantidWSIndexDialog::is1DPlotSelected() const {
+  return m_widget.is1DPlotSelected();
+}
+
+/**
+ * Whether the user selected the "waterfall" plot
  * @returns True if waterfall plot selected
  */
 bool MantidWSIndexDialog::isWaterfallPlotSelected() const {
@@ -407,13 +836,37 @@ bool MantidWSIndexDialog::isWaterfallPlotSelected() const {
 }
 
 /**
- * Whether the user checked the "tiled" box
+ * Whether the user selected the "tiled" plot
  * @returns True if tiled plot selected
  */
 bool MantidWSIndexDialog::isTiledPlotSelected() const {
   return m_widget.isTiledPlotSelected();
 }
 
+/**
+* Whether the user selected the surface plot
+* @returns True if surface plot selected
+*/
+bool MantidWSIndexDialog::isSurfacePlotSelected() const {
+  return m_widget.isSurfacePlotSelected();
+}
+
+/**
+* Whether the user selected the surface plot
+* @returns True if surface plot selected
+*/
+bool MantidWSIndexDialog::isContourPlotSelected() const {
+  return m_widget.isContourPlotSelected();
+}
+
+/**
+* Whether the user selected error bars
+* @returns True if error bars selected
+*/
+bool MantidWSIndexDialog::isErrorBarsSelected() const {
+  return m_widget.isErrorBarsSelected();
+}
+
 //----------------------------------
 // MantidWSIndexDialog private slots
 //----------------------------------
@@ -430,17 +883,22 @@ void MantidWSIndexDialog::plot() {
  * Called when "Plot all" button pressed
  */
 void MantidWSIndexDialog::plotAll() {
-  m_widget.plotAllRequested();
-  accept();
+  if (m_widget.plotAllRequested()) {
+    accept();
+  }
 }
 
 //----------------------------------
 // MantidWSIndexDialog private methods
 //----------------------------------
-void MantidWSIndexDialog::init() {
+void MantidWSIndexDialog::init(bool isAdvanced) {
   m_outer = new QVBoxLayout;
 
-  setWindowTitle(tr("MantidPlot"));
+  if (isAdvanced) {
+    setWindowTitle(tr("Plot Advanced"));
+  } else {
+    setWindowTitle(tr("Plot Spectrum"));
+  }
   m_outer->insertWidget(1, &m_widget);
   initButtons();
   setLayout(m_outer);
@@ -737,6 +1195,8 @@ void IntervalList::setIntervalList(const IntervalList &intervals) {
   m_list = QList<Interval>(intervals.getList());
 }
 
+void IntervalList::clear() { m_list = QList<Interval>(); }
+
 std::set<int> IntervalList::getIntSet() const {
   std::set<int> intSet;
 
diff --git a/MantidQt/MantidWidgets/src/MuonFitDataSelector.cpp b/MantidQt/MantidWidgets/src/MuonFitDataSelector.cpp
index 07fb31bb11291b3deae887f1d0bb1cebfdecadb8..4fa7b1025202367cacdc9fdd1bc34fc37c64e8f8 100644
--- a/MantidQt/MantidWidgets/src/MuonFitDataSelector.cpp
+++ b/MantidQt/MantidWidgets/src/MuonFitDataSelector.cpp
@@ -1,6 +1,9 @@
 #include "MantidQtMantidWidgets/MuonFitDataSelector.h"
 #include "MantidKernel/Logger.h"
 
+#include "qttreepropertybrowser.h"
+#include "qtpropertymanager.h"
+
 namespace {
 Mantid::Kernel::Logger g_log("MuonFitDataSelector");
 }
@@ -15,14 +18,8 @@ namespace MantidWidgets {
 MuonFitDataSelector::MuonFitDataSelector(QWidget *parent)
     : MantidWidget(parent) {
   m_ui.setupUi(this);
-  this->setUpValidators();
   this->setDefaultValues();
   this->setUpConnections();
-  m_groupBoxes.insert("fwd", m_ui.chkFwd);
-  m_groupBoxes.insert("bwd", m_ui.chkBwd);
-  m_periodBoxes.insert("1", m_ui.chk1);
-  m_periodBoxes.insert("2", m_ui.chk2);
-
   // Disable "Browse" button - use case is that first run will always be the one
   // selected on front tab. User will type in the runs they want rather than
   // using the Browse button. (If they want to "Browse" they can use front tab).
@@ -34,18 +31,22 @@ MuonFitDataSelector::MuonFitDataSelector(QWidget *parent)
  * @param parent :: [input] Parent dialog for the widget
  * @param runNumber :: [input] Run number of initial workspace
  * @param instName :: [input] Name of instrument from initial workspace
- * @param numPeriods :: [input] Number of periods from initial workspace
- * @param groups :: [input] Group names from initial workspace
- */
+*/
 MuonFitDataSelector::MuonFitDataSelector(QWidget *parent, int runNumber,
-                                         const QString &instName,
-                                         size_t numPeriods,
-                                         const QStringList &groups)
+                                         const QString &instName) /*
+  * numPeriods :: [input] Number of periods from initial workspace
+  * groups :: [input] Group names from initial workspace
+                                          size_t numPeriods,
+                                          const QStringList &groups)*/
     : MuonFitDataSelector(parent) {
   this->setWorkspaceDetails(QString::number(runNumber), instName,
                             boost::optional<QString>{});
-  this->setNumPeriods(numPeriods);
-  this->setAvailableGroups(groups);
+  // not used in this case
+  // but leave these here as a remainder
+  // for future changes that may need to assign them
+
+  // this->setNumPeriods(numPeriods);
+  // this->setAvailableGroups(groups);
 }
 
 /**
@@ -56,18 +57,8 @@ void MuonFitDataSelector::setUpConnections() {
   connect(m_ui.runs, SIGNAL(filesFound()), this, SLOT(userChangedRuns()));
   connect(m_ui.rbCoAdd, SIGNAL(toggled(bool)), this,
           SLOT(fitTypeChanged(bool)));
-  connect(m_ui.txtStart, SIGNAL(editingFinished()), this,
-          SIGNAL(dataPropertiesChanged()));
-  connect(m_ui.txtEnd, SIGNAL(editingFinished()), this,
-          SIGNAL(dataPropertiesChanged()));
-  connect(m_ui.chkCombine, SIGNAL(stateChanged(int)), this,
-          SLOT(periodCombinationStateChanged(int)));
   connect(m_ui.txtSimFitLabel, SIGNAL(editingFinished()), this,
           SIGNAL(simulLabelChanged()));
-  connect(this, SIGNAL(selectedGroupsChanged()), this,
-          SLOT(checkForMultiGroupPeriodSelection()));
-  connect(this, SIGNAL(selectedPeriodsChanged()), this,
-          SLOT(checkForMultiGroupPeriodSelection()));
   connect(this, SIGNAL(workspaceChanged()), this,
           SLOT(checkForMultiGroupPeriodSelection()));
   connect(m_ui.cbDataset, SIGNAL(currentIndexChanged(int)), this,
@@ -75,10 +66,6 @@ void MuonFitDataSelector::setUpConnections() {
   connect(m_ui.btnNextDataset, SIGNAL(clicked()), this, SLOT(setNextDataset()));
   connect(m_ui.btnPrevDataset, SIGNAL(clicked()), this,
           SLOT(setPreviousDataset()));
-  connect(m_ui.txtFirst, SIGNAL(editingFinished()), this,
-          SIGNAL(selectedPeriodsChanged()));
-  connect(m_ui.txtSecond, SIGNAL(editingFinished()), this,
-          SIGNAL(selectedPeriodsChanged()));
 }
 
 /**
@@ -108,37 +95,13 @@ void MuonFitDataSelector::userChangedRuns() {
   emit workspaceChanged();
 }
 
-/**
- * Sets group names and updates checkboxes on UI
- * By default sets all unchecked
- * @param groups :: [input] List of group names
- */
-void MuonFitDataSelector::setAvailableGroups(const QStringList &groups) {
-  // If it's the same list, do nothing
-  if (groups.size() == m_groupBoxes.size()) {
-    auto existingGroups = m_groupBoxes.keys();
-    auto newGroups = groups;
-    qSort(existingGroups);
-    qSort(newGroups);
-    if (existingGroups == newGroups) {
-      return;
-    }
-  }
-
-  clearGroupCheckboxes();
-  for (const auto group : groups) {
-    addGroupCheckbox(group);
-  }
-}
-
 /**
  * Get the user's supplied start time (default 0)
  * @returns :: start time input by user in microseconds
  */
 double MuonFitDataSelector::getStartTime() const {
   // Validator ensures cast to double will succeed
-  const QString start = m_ui.txtStart->text();
-  return start.toDouble();
+  return m_startX; // start.toDouble();
 }
 
 /**
@@ -146,7 +109,7 @@ double MuonFitDataSelector::getStartTime() const {
  * @param start :: [input] Start time in microseconds
  */
 void MuonFitDataSelector::setStartTimeQuietly(double start) {
-  m_ui.txtStart->setText(QString::number(start));
+  m_startX = start;
 }
 
 /**
@@ -162,19 +125,13 @@ void MuonFitDataSelector::setStartTime(double start) {
  * Get the user's supplied end time (default 10)
  * @returns :: start time input by user in microseconds
  */
-double MuonFitDataSelector::getEndTime() const {
-  // Validator ensures cast to double will succeed
-  const QString end = m_ui.txtEnd->text();
-  return end.toDouble();
-}
+double MuonFitDataSelector::getEndTime() const { return m_endX; }
 
 /**
  * Set the end time in the UI WITHOUT sending signal
  * @param end :: [input] End time in microseconds
  */
-void MuonFitDataSelector::setEndTimeQuietly(double end) {
-  m_ui.txtEnd->setText(QString::number(end));
-}
+void MuonFitDataSelector::setEndTimeQuietly(double end) { m_endX = end; }
 
 /**
  * Set the end time in the UI, and send signal
@@ -193,16 +150,6 @@ QStringList MuonFitDataSelector::getFilenames() const {
   return m_ui.runs->getFilenames();
 }
 
-/**
- * Set up input validation on UI controls
- * e.g. some boxes should only accept numeric input
- */
-void MuonFitDataSelector::setUpValidators() {
-  // Start/end times: numeric values only
-  m_ui.txtStart->setValidator(new QDoubleValidator(this));
-  m_ui.txtEnd->setValidator(new QDoubleValidator(this));
-}
-
 /**
  * Set up run finder with initial run number and instrument
  * @param runNumbers :: [input] Run numbers from loaded workspace
@@ -243,127 +190,17 @@ void MuonFitDataSelector::setWorkspaceDetails(
  */
 void MuonFitDataSelector::setDefaultValues() {
   const QChar muMicro{0x03BC}; // mu in Unicode
-  m_ui.lblStart->setText(QString("Start (%1s)").arg(muMicro));
-  m_ui.lblEnd->setText(QString("End (%1s)").arg(muMicro));
   this->setStartTime(0.0);
   this->setEndTime(0.0);
-  setPeriodCombination(false);
   m_ui.txtSimFitLabel->setText("0");
   emit simulLabelChanged(); // make sure default "0" is set
 }
-
-/**
- * Set visibility of the "Periods" group box
- * (if single-period, hide to not confuse the user)
- * @param visible :: [input] Whether to show or hide the options
- */
-void MuonFitDataSelector::setPeriodVisibility(bool visible) {
-  m_ui.groupBoxPeriods->setVisible(visible);
-}
-
-/**
- * Add a new checkbox to the list of groups with given name
- * The new checkbox is unchecked by default
- * @param name :: [input] Name of group to add
- */
-void MuonFitDataSelector::addGroupCheckbox(const QString &name) {
-  auto checkBox = new QCheckBox(name);
-  m_groupBoxes.insert(name, checkBox);
-  checkBox->setChecked(false);
-  m_ui.verticalLayoutGroups->addWidget(checkBox);
-  connect(checkBox, SIGNAL(clicked(bool)), this,
-          SIGNAL(selectedGroupsChanged()));
-}
-
-/**
- * Clears all group names and checkboxes
- * (ready to add new ones)
- */
-void MuonFitDataSelector::clearGroupCheckboxes() {
-  for (const auto &checkbox : m_groupBoxes) {
-    m_ui.verticalLayoutGroups->removeWidget(checkbox);
-    checkbox->deleteLater(); // will disconnect signal automatically
-  }
-  m_groupBoxes.clear();
-}
-
-/**
- * Sets checkboxes on UI for given number
- * of periods plus "combination" boxes.
- * Hides control for single-period data.
- * @param numPeriods :: [input] Number of periods
- */
-void MuonFitDataSelector::setNumPeriods(size_t numPeriods) {
-  const size_t currentPeriods = static_cast<size_t>(m_periodBoxes.size());
-  if (numPeriods > currentPeriods) {
-    // create more boxes
-    for (size_t i = currentPeriods; i != numPeriods; i++) {
-      QString name = QString::number(i + 1);
-      auto checkbox = new QCheckBox(name);
-      m_periodBoxes.insert(name, checkbox);
-      m_ui.verticalLayoutPeriods->addWidget(checkbox);
-    }
-  } else if (numPeriods < currentPeriods) {
-    // delete the excess
-    QStringList toRemove;
-    for (const QString name : m_periodBoxes.keys()) {
-      const size_t periodNum = static_cast<size_t>(name.toInt());
-      if (periodNum > numPeriods) {
-        m_ui.verticalLayoutPeriods->removeWidget(m_periodBoxes.value(name));
-        m_periodBoxes.value(name)->deleteLater(); // will disconnect signal
-        toRemove.append(name);
-      }
-    }
-    for (const QString name : toRemove) {
-      m_periodBoxes.remove(name);
-    }
-  }
-
-  // Ensure signals connected
-  for (const auto &checkbox : m_periodBoxes) {
-    connect(checkbox, SIGNAL(clicked()), this,
-            SIGNAL(selectedPeriodsChanged()));
-  }
-
-  // Always put the combination at the bottom ("-1" = at end)
-  m_ui.verticalLayoutPeriods->removeItem(m_ui.horizontalLayoutPeriodsCombine);
-  m_ui.verticalLayoutPeriods->insertLayout(-1,
-                                           m_ui.horizontalLayoutPeriodsCombine);
-
-  // Hide box if single-period
-  this->setPeriodVisibility(numPeriods > 1);
-}
-
 /**
  * Returns a list of periods and combinations chosen in UI
  * @returns :: list of periods e.g. "1", "3", "1+2-3+4", or "" if single-period
  */
 QStringList MuonFitDataSelector::getPeriodSelections() const {
-  QStringList checked;
-  if (m_ui.groupBoxPeriods->isVisible()) {
-    for (auto iter = m_periodBoxes.constBegin();
-         iter != m_periodBoxes.constEnd(); ++iter) {
-      if (iter.value()->isChecked()) {
-        checked.append(iter.key());
-      }
-    }
-
-    // combination
-    if (m_ui.chkCombine->isChecked()) {
-      QString combination = m_ui.txtFirst->text();
-      const auto second = m_ui.txtSecond->text();
-      if (!second.isEmpty()) {
-        combination.append("-").append(m_ui.txtSecond->text());
-      }
-      combination.replace(" ", "");
-      combination.replace(",", "+");
-      checked.append(combination);
-    }
-  } else {
-    // Single-period data
-    checked << "";
-  }
-  return checked;
+  return m_chosenPeriods;
 }
 
 /**
@@ -371,89 +208,7 @@ QStringList MuonFitDataSelector::getPeriodSelections() const {
  * @returns :: list of selected groups
  */
 QStringList MuonFitDataSelector::getChosenGroups() const {
-  QStringList chosen;
-  for (auto iter = m_groupBoxes.constBegin(); iter != m_groupBoxes.constEnd();
-       ++iter) {
-    if (iter.value()->isChecked()) {
-      chosen.append(iter.key());
-    }
-  }
-  return chosen;
-}
-/**
-* Clears the list of selected groups (unchecks boxes)
-*/
-void MuonFitDataSelector::clearChosenGroups() const {
-  for (auto iter = m_groupBoxes.constBegin(); iter != m_groupBoxes.constEnd();
-       ++iter) {
-    iter.value()->setChecked(false);
-  }
-}
-/**
- * Set the chosen group ticked and all others off
- * Used when switching from Home tab to Data Analysis tab
- * @param group :: [input] Name of group to select
- */
-void MuonFitDataSelector::setChosenGroup(const QString &group) {
-  for (auto iter = m_groupBoxes.constBegin(); iter != m_groupBoxes.constEnd();
-       ++iter) {
-    if (iter.key() == group) {
-      iter.value()->setChecked(true);
-    }
-  }
-}
-
-/**
- * Set the chosen period/combination ticked and all others off
- * Used when switching from Home tab to Data Analysis tab
- * @param period :: [input] Period string to set selected
- * (can be just one period or a combination)
- */
-void MuonFitDataSelector::setChosenPeriod(const QString &period) {
-  // Begin by unchecking everything
-  for (auto checkbox : m_periodBoxes) {
-    checkbox->setChecked(false);
-  }
-
-  // If single-period or all periods, string will be empty
-  if (period.isEmpty()) {
-    if (m_periodBoxes.size() == 1) { // single-period
-      setPeriodCombination(false);
-      m_periodBoxes.begin().value()->setChecked(true);
-    } else { // all periods selected
-      setPeriodCombination(true);
-      QString combination;
-      for (int i = 0; i < m_periodBoxes.count() - 1; i++) {
-        combination.append(QString::number(i + 1)).append(", ");
-      }
-      m_ui.txtFirst->setText(
-          combination.append(QString::number(m_periodBoxes.count())));
-      m_ui.txtSecond->clear();
-    }
-  } else {
-    // Test if period can be cast to int (just one period) or if it's a
-    // combination e.g. "1+2"
-    bool onePeriod(false);
-    /*const int chosenPeriod = */ period.toInt(&onePeriod);
-    if (onePeriod) {
-      // set just one
-      for (auto iter = m_periodBoxes.constBegin();
-           iter != m_periodBoxes.constEnd(); ++iter) {
-        if (iter.key() == period) {
-          iter.value()->setChecked(true);
-        }
-      }
-      setPeriodCombination(false);
-    } else {
-      // set the combination
-      QStringList parts = period.split('-');
-      if (parts.size() == 2) {
-        m_ui.txtFirst->setText(parts[0].replace("+", ", "));
-        m_ui.txtSecond->setText(parts[1].replace("+", ", "));
-        setPeriodCombination(true);
-      }
-    }
-  }
+  return m_chosenGroups;
 }
 
 /**
@@ -513,10 +268,9 @@ IMuonFitDataSelector::FitType MuonFitDataSelector::getFitType() const {
   // If radio buttons disabled, it's a single fit unless multiple groups/periods
   // chosen
   if (!m_ui.rbCoAdd->isEnabled()) {
-    const auto groups = getChosenGroups();
-    const auto periods = getPeriodSelections();
-    return groups.size() <= 1 && periods.size() <= 1 ? FitType::Single
-                                                     : FitType::Simultaneous;
+    return m_chosenGroups.size() <= 1 && m_chosenPeriods.size() <= 1
+               ? FitType::Single
+               : FitType::Simultaneous;
   } else {
     // which button is selected
     if (m_ui.rbCoAdd->isChecked()) {
@@ -545,33 +299,6 @@ void MuonFitDataSelector::setFitType(IMuonFitDataSelector::FitType type) {
   }
   checkForMultiGroupPeriodSelection();
 }
-
-/**
- * Check/uncheck period combination checkbox and set the textboxes
- * enabled/disabled
- * @param on :: [input] Turn on or off
- */
-void MuonFitDataSelector::setPeriodCombination(bool on) {
-  m_ui.chkCombine->setChecked(on);
-  m_ui.txtFirst->setEnabled(on);
-  m_ui.txtSecond->setEnabled(on);
-}
-
-/**
- * Slot: Keeps enabled/disabled state of textboxes in sync with checkbox
- * for period combination choices
- * @param state :: [input] New check state of box
- */
-void MuonFitDataSelector::periodCombinationStateChanged(int state) {
-  m_ui.txtFirst->setEnabled(state == Qt::Checked);
-  m_ui.txtSecond->setEnabled(state == Qt::Checked);
-  // If no text is set in the boxes, put something in there
-  if (m_ui.txtFirst->text().isEmpty() && m_ui.txtSecond->text().isEmpty()) {
-    m_ui.txtFirst->setText("1");
-  }
-  emit selectedPeriodsChanged();
-}
-
 /**
  * Return the instrument name currently set as the override
  * for the data selector
@@ -601,11 +328,6 @@ void MuonFitDataSelector::unsetBusyState() {
   disconnect(m_ui.runs, SIGNAL(fileInspectionFinished()), this,
              SLOT(unsetBusyState()));
   this->setCursor(Qt::ArrowCursor);
-  m_ui.groupBoxDataSelector->setEnabled(true);
-  m_ui.groupBoxGroups->setEnabled(true);
-  if (m_ui.groupBoxPeriods->isVisible()) {
-    m_ui.groupBoxPeriods->setEnabled(true);
-  }
 }
 
 /**
@@ -616,11 +338,6 @@ void MuonFitDataSelector::setBusyState() {
   connect(m_ui.runs, SIGNAL(fileInspectionFinished()), this,
           SLOT(unsetBusyState()));
   this->setCursor(Qt::WaitCursor);
-  m_ui.groupBoxDataSelector->setEnabled(false);
-  m_ui.groupBoxGroups->setEnabled(false);
-  if (m_ui.groupBoxPeriods->isVisible()) {
-    m_ui.groupBoxPeriods->setEnabled(false);
-  }
 }
 
 /**
@@ -644,9 +361,8 @@ void MuonFitDataSelector::setSimultaneousFitLabel(const QString &label) {
  * Called when groups/periods selection changes.
  */
 void MuonFitDataSelector::checkForMultiGroupPeriodSelection() {
-  const auto groups = getChosenGroups();
-  const auto periods = getPeriodSelections();
-  m_ui.txtSimFitLabel->setEnabled(groups.size() > 1 || periods.size() > 1 ||
+  m_ui.txtSimFitLabel->setEnabled(m_chosenGroups.size() > 1 ||
+                                  m_chosenPeriods.size() > 1 ||
                                   getFitType() == FitType::Simultaneous);
 }
 
@@ -672,7 +388,6 @@ QString MuonFitDataSelector::getDatasetName() const {
  */
 void MuonFitDataSelector::setDatasetNames(const QStringList &datasetNames) {
   const auto selectedName = m_ui.cbDataset->currentText();
-
   // Turn off signals while names are updated
   m_ui.cbDataset->blockSignals(true);
   m_ui.cbDataset->clear();
diff --git a/MantidQt/MantidWidgets/src/MuonFitPropertyBrowser.cpp b/MantidQt/MantidWidgets/src/MuonFitPropertyBrowser.cpp
index 9d82982a708e8cc5582afbbcf7d2528ba058b974..4df544d78653e9a87de29f35170f365760fdfc83 100644
--- a/MantidQt/MantidWidgets/src/MuonFitPropertyBrowser.cpp
+++ b/MantidQt/MantidWidgets/src/MuonFitPropertyBrowser.cpp
@@ -9,6 +9,8 @@
 #include "MantidKernel/VectorHelper.h"
 #include "MantidQtMantidWidgets/StringEditorFactory.h"
 
+#include "MantidQtMantidWidgets/MuonFitDataSelector.h"
+
 // Suppress a warning coming out of code that isn't ours
 #if defined(__INTEL_COMPILER)
 #pragma warning disable 1125
@@ -43,13 +45,18 @@
 #include <QSettings>
 #include <QMessageBox>
 #include <QAction>
+#include <QFormLayout>
+
 #include <QLayout>
 #include <QSplitter>
-#include <QMap>
 #include <QLabel>
 #include <QPushButton>
+
 #include <QMenu>
 #include <QSignalMapper>
+
+#include <QCheckBox>
+
 namespace {
 Mantid::Kernel::Logger g_log("MuonFitPropertyBrowser");
 }
@@ -87,6 +94,15 @@ void MuonFitPropertyBrowser::init() {
   // Seperates the data and the settings into two seperate categories
   settingsGroup = m_groupManager->addProperty("Data");
 
+  QSettings multiFitSettings;
+  multiFitSettings.beginGroup("");
+
+  /* Create function group */
+  QtProperty *multiFitSettingsGroup(NULL);
+
+  // Seperates the data and the settings into two seperate categories
+  multiFitSettingsGroup = m_groupManager->addProperty("Data");
+
   // Have slightly different names as requested by the muon scientists.
   m_startX =
       addDoubleProperty(QString("Start (%1s)").arg(QChar(0x03BC))); //(mu);
@@ -129,6 +145,42 @@ void MuonFitPropertyBrowser::init() {
   settingsGroup->addSubProperty(m_startX);
   settingsGroup->addSubProperty(m_endX);
   settingsGroup->addSubProperty(m_normalization);
+
+  // Disable "Browse" button - use case is that first run will always be the one
+  // selected on front tab. User will type in the runs they want rather than
+  // using the Browse button. (If they want to "Browse" they can use front tab).
+
+  multiFitSettingsGroup->addSubProperty(m_startX);
+  multiFitSettingsGroup->addSubProperty(m_endX);
+  m_groupsToFit = m_enumManager->addProperty("Groups/Pairs to fit");
+  m_groupsToFitOptions << "All groups"
+                       << "All Pairs"
+                       << "Custom";
+  m_showGroupValue << "groups";
+  m_showGroup = m_enumManager->addProperty("Selected Groups");
+  m_enumManager->setEnumNames(m_groupsToFit, m_groupsToFitOptions);
+  multiFitSettingsGroup->addSubProperty(m_groupsToFit);
+  multiFitSettingsGroup->addSubProperty(m_showGroup);
+
+  m_enumManager->setEnumNames(m_showGroup, m_showGroupValue);
+  QString tmp = "fwd";
+  addGroupCheckbox(tmp);
+  tmp = "bwd";
+  addGroupCheckbox(tmp);
+  m_periodsToFit = m_enumManager->addProperty("Periods to fit");
+  m_periodsToFitOptions << "1"
+                        << "2"
+                        << "Custom";
+  m_showPeriodValue << "1";
+  m_showPeriods = m_enumManager->addProperty("Selected Periods");
+  m_enumManager->setEnumNames(m_periodsToFit, m_periodsToFitOptions);
+  multiFitSettingsGroup->addSubProperty(m_periodsToFit);
+  multiFitSettingsGroup->addSubProperty(m_showPeriods);
+  m_enumManager->setEnumNames(m_showPeriods, m_showPeriodValue);
+
+  connect(m_browser, SIGNAL(currentItemChanged(QtBrowserItem *)), this,
+          SLOT(currentItemChanged(QtBrowserItem *)));
+
   /* Create editors and assign them to the managers */
   createEditors(w);
 
@@ -136,11 +188,35 @@ void MuonFitPropertyBrowser::init() {
 
   m_functionsGroup = m_browser->addProperty(functionsGroup);
   m_settingsGroup = m_browser->addProperty(settingsGroup);
+  m_multiFitSettingsGroup = m_browser->addProperty(multiFitSettingsGroup);
+
+  m_btnGroup = new QGroupBox(tr("Reselect Data"));
+  QHBoxLayout *btnLayout = new QHBoxLayout;
+  m_reselectGroupBtn = new QPushButton("Groups/Pairs");
+  m_reselectPeriodBtn = new QPushButton("Periods");
+  m_generateBtn = new QPushButton("Combine Periods");
+  m_groupWindow = new QDialog(this);
+  m_periodWindow = new QDialog(this);
+  m_comboWindow = new QDialog(this);
+
+  m_reselectGroupBtn->setEnabled(false);
+  m_reselectPeriodBtn->setEnabled(false);
+  connect(m_reselectGroupBtn, SIGNAL(released()), this,
+          SLOT(groupBtnPressed()));
+  connect(m_reselectPeriodBtn, SIGNAL(released()), this,
+          SLOT(periodBtnPressed()));
+  connect(m_generateBtn, SIGNAL(released()), this, SLOT(generateBtnPressed()));
+
+  btnLayout->addWidget(m_reselectGroupBtn);
+  btnLayout->addWidget(m_reselectPeriodBtn);
+  btnLayout->addWidget(m_generateBtn);
+
+  m_btnGroup->setLayout(btnLayout);
 
   // Don't show "Function" or "Data" sections as they have separate widgets
   m_browser->setItemVisible(m_functionsGroup, false);
   m_browser->setItemVisible(m_settingsGroup, false);
-
+  m_browser->setItemVisible(m_multiFitSettingsGroup, true);
   // Custom settings that are specific and asked for by the muon scientists.
   QtProperty *customSettingsGroup = m_groupManager->addProperty("Settings");
 
@@ -184,9 +260,11 @@ void MuonFitPropertyBrowser::init() {
     const int index = parentLayout->count() - 1;
     constexpr int stretchFactor = 10; // so these widgets get any extra space
     parentLayout->insertWidget(index, m_mainSplitter, stretchFactor);
+
     parentLayout->setSpacing(0);
     parentLayout->setMargin(0);
     parentLayout->setContentsMargins(0, 0, 0, 0);
+    parentLayout->insertWidget(index + 1, m_btnGroup);
   }
   // Update tooltips when function structure is (or might've been) changed in
   // any way
@@ -200,6 +278,12 @@ void MuonFitPropertyBrowser::executeFitMenu(const QString &item) {
     FitPropertyBrowser::executeFitMenu(item);
   }
 }
+// Create group/pair selection pop up
+void MuonFitPropertyBrowser::groupBtnPressed() { genGroupWindow(); }
+// Create period selection pop up
+void MuonFitPropertyBrowser::periodBtnPressed() { genPeriodWindow(); }
+// Create combination selection pop up
+void MuonFitPropertyBrowser::generateBtnPressed() { genCombinePeriodWindow(); }
 /**
 pulate the fit button.
 * This initialization includes:
@@ -225,7 +309,6 @@ void MuonFitPropertyBrowser::setFitEnabled(bool yes) {
   m_fitActionSeqFit->setEnabled(yes);
   m_fitActionTFAsymm->setEnabled(yes);
 }
-
 /**
 * Set the input workspace name
 */
@@ -240,7 +323,79 @@ void MuonFitPropertyBrowser::setWorkspaceName(const QString &wsName) {
   if (i >= 0)
     m_enumManager->setValue(m_workspace, i);
 }
-
+/** Called when a dropdown menu is changed
+* @param prop :: A pointer to the function name property
+*/
+void MuonFitPropertyBrowser::enumChanged(QtProperty *prop) {
+  if (!m_changeSlotsEnabled)
+    return;
+  if (prop == m_groupsToFit) {
+    int j = m_enumManager->value(m_groupsToFit);
+    std::string option = m_groupsToFitOptions[j].toStdString();
+
+    if (option == "All groups") {
+      setAllGroups();
+      m_reselectGroupBtn->setEnabled(false);
+    } else if (option == "All Pairs") {
+      setAllPairs();
+      m_reselectGroupBtn->setEnabled(false);
+    } else if (option == "Custom") {
+      m_reselectGroupBtn->setEnabled(true);
+      genGroupWindow();
+    }
+    updateGroupDisplay();
+
+  } else if (prop == m_periodsToFit) {
+    int j = m_enumManager->value(m_periodsToFit);
+    std::string option = m_periodsToFitOptions[j].toStdString();
+    if (option == "Custom") {
+      m_reselectPeriodBtn->setEnabled(true);
+      genPeriodWindow();
+    } else {
+      for (auto iter = m_periodBoxes.constBegin();
+           iter != m_periodBoxes.constEnd(); ++iter) {
+        if (option == iter.key().toStdString()) {
+          m_boolManager->setValue(iter.value(), true);
+        } else {
+          m_boolManager->setValue(iter.value(), false);
+        }
+        m_reselectPeriodBtn->setEnabled(false);
+      }
+    }
+    updatePeriodDisplay();
+  } else if (prop == m_workspace) {
+    // make sure the output is updated
+    FitPropertyBrowser::enumChanged(prop);
+    int j = m_enumManager->value(m_workspace);
+    std::string option = m_workspaceNames[j].toStdString();
+    setOutputName(option);
+  } else {
+    FitPropertyBrowser::enumChanged(prop);
+  }
+}
+/** Sets the display for
+* selected groups
+*/
+void MuonFitPropertyBrowser::updateGroupDisplay() {
+  m_showGroupValue.clear();
+  auto tmp = getChosenGroups().join(",").toStdString();
+  m_showGroupValue << getChosenGroups().join(",");
+  m_enumManager->setEnumNames(m_showGroup, m_showGroupValue);
+  m_multiFitSettingsGroup->property()->addSubProperty(m_showGroup);
+}
+/** Sets the display for
+* selected periods
+*/
+void MuonFitPropertyBrowser::updatePeriodDisplay() {
+  m_showPeriodValue.clear();
+  auto tmp = getChosenPeriods();
+  tmp.replaceInStrings(QRegExp(","), "+");
+  m_showPeriodValue << tmp.join(",");
+  m_enumManager->setEnumNames(m_showPeriods, m_showPeriodValue);
+  if (m_periodsToFitOptions.size() > 1) {
+    m_multiFitSettingsGroup->property()->addSubProperty(m_showPeriods);
+  }
+}
 /** Called when a double property changed
  * @param prop :: A pointer to the property
  */
@@ -303,8 +458,31 @@ void MuonFitPropertyBrowser::boolChanged(QtProperty *prop) {
     const bool val = m_boolManager->value(prop);
     emit fitRawDataClicked(val);
   } else {
-    // defer to parent class
-    FitPropertyBrowser::boolChanged(prop);
+    // search map for group/pair change
+    bool done = false;
+    for (auto iter = m_groupBoxes.constBegin(); iter != m_groupBoxes.constEnd();
+         ++iter) {
+      if (iter.value() == prop) {
+        done = true;
+        updateGroupDisplay();
+        emit groupBoxClicked();
+      }
+    }
+    // search map for period change
+    if (done == false) {
+      for (auto iter = m_periodBoxes.constBegin();
+           iter != m_periodBoxes.constEnd(); ++iter) {
+        if (iter.value() == prop) {
+          done = true;
+          updatePeriodDisplay();
+          emit periodBoxClicked();
+        }
+      }
+    }
+    if (done == false) {
+      // defer to parent class
+      FitPropertyBrowser::boolChanged(prop);
+    }
   }
 }
 
@@ -522,7 +700,6 @@ void MuonFitPropertyBrowser::runFit() {
     alg->setProperty("WorkspaceIndex", workspaceIndex());
     alg->setProperty("StartX", startX());
     alg->setProperty("EndX", endX());
-    alg->setPropertyValue("Output", outputName());
     alg->setPropertyValue("Minimizer", minimizer());
     alg->setPropertyValue("CostFunction", costFunction());
 
@@ -541,7 +718,10 @@ void MuonFitPropertyBrowser::runFit() {
         alg->setProperty("StartX_" + suffix, startX());
         alg->setProperty("EndX_" + suffix, endX());
       }
+    } else {
+      setSingleFitLabel(wsName);
     }
+    alg->setPropertyValue("Output", outputName());
 
     observeFinish(alg);
     alg->executeAsync();
@@ -742,11 +922,18 @@ std::string MuonFitPropertyBrowser::outputName() const {
 void MuonFitPropertyBrowser::setMultiFittingMode(bool enabled) {
   // First, clear whatever model is currently set
   this->clear();
-
+  // set default selection (all groups)
+  if (enabled) {
+    setAllGroups();
+  } else { // clear current selection
+    clearChosenGroups();
+    clearChosenPeriods();
+  }
   // Show or hide "Function" and "Data" sections
   m_browser->setItemVisible(m_functionsGroup, !enabled);
   m_browser->setItemVisible(m_settingsGroup, !enabled);
-
+  m_browser->setItemVisible(m_multiFitSettingsGroup, enabled);
+  m_btnGroup->setVisible(enabled);
   // Show or hide additional widgets
   for (int i = 0; i < m_widgetSplitter->count(); ++i) {
     if (auto *widget = m_widgetSplitter->widget(i)) {
@@ -796,6 +983,382 @@ bool MuonFitPropertyBrowser::hasGuess() const {
     return false;
   }
 }
+/**
+* Sets group names and updates checkboxes on UI
+* By default sets all unchecked
+* @param groups :: [input] List of group names
+*/
+void MuonFitPropertyBrowser::setAvailableGroups(const QStringList &groups) {
+
+  m_enumManager->setValue(m_groupsToFit, 0);
+  // If it's the same list, do nothing
+  if (groups.size() == m_groupBoxes.size()) {
+    auto existingGroups = m_groupBoxes.keys();
+    auto newGroups = groups;
+    qSort(existingGroups);
+    qSort(newGroups);
+    if (existingGroups == newGroups) {
+      return;
+    }
+  }
+
+  clearGroupCheckboxes();
+  QSettings settings;
+  for (const auto group : groups) {
+    addGroupCheckbox(group);
+  }
+}
+/**
+* Selects a single group/pair
+* @param group :: [input] Group/pair to select
+*/
+void MuonFitPropertyBrowser::setChosenGroup(QString &group) {
+  clearChosenGroups();
+  for (auto iter = m_groupBoxes.constBegin(); iter != m_groupBoxes.constEnd();
+       ++iter) {
+    if (iter.key() == group) {
+      m_boolManager->setValue(iter.value(), true);
+    }
+  }
+}
+/**
+* Clears all group names and checkboxes
+* (ready to add new ones)
+*/
+void MuonFitPropertyBrowser::clearGroupCheckboxes() {
+  for (const auto &checkbox : m_groupBoxes) {
+    delete (checkbox);
+  }
+  m_groupBoxes.clear();
+}
+/**
+* Add a new checkbox to the list of groups with given name
+* The new checkbox is checked according to dropdown menu selection
+* @param name :: [input] Name of group to add
+*/
+void MuonFitPropertyBrowser::addGroupCheckbox(const QString &name) {
+  m_groupBoxes.insert(name, m_boolManager->addProperty(name));
+  int j = m_enumManager->value(m_groupsToFit);
+  auto option = m_groupsToFitOptions[j].toStdString();
+  if (option == "All groups") {
+    setAllGroups();
+  } else if (option == "All Pairs") {
+    setAllPairs();
+  }
+}
+/**
+* Returns a list of the selected groups (checked boxes)
+* @returns :: list of selected groups
+*/
+QStringList MuonFitPropertyBrowser::getChosenGroups() const {
+  QStringList chosen;
+  for (auto iter = m_groupBoxes.constBegin(); iter != m_groupBoxes.constEnd();
+       ++iter) {
+    if (m_boolManager->value(iter.value()) == true) {
+      chosen.append(iter.key());
+    }
+  }
+  return chosen;
+}
+/**
+* Clears the list of selected groups (unchecks boxes)
+*/
+void MuonFitPropertyBrowser::clearChosenGroups() const {
+  for (auto iter = m_groupBoxes.constBegin(); iter != m_groupBoxes.constEnd();
+       ++iter) {
+    m_boolManager->setValue(iter.value(), false);
+  }
+}
+
+/**
+* Selects all groups
+*/
+void MuonFitPropertyBrowser::setAllGroups() {
+
+  clearChosenGroups();
+  for (auto iter = m_groupBoxes.constBegin(); iter != m_groupBoxes.constEnd();
+       ++iter) {
+    for (auto group : m_groupsList) {
+      if (iter.key().toStdString() == group) {
+        m_boolManager->setValue(iter.value(), true);
+      }
+    }
+  }
+}
+/*
+* Sets all pairs
+*/
+void MuonFitPropertyBrowser::setAllPairs() {
+  clearChosenGroups();
+  for (auto iter = m_groupBoxes.constBegin(); iter != m_groupBoxes.constEnd();
+       ++iter) {
+    bool isItGroup = false;
+    for (auto group : m_groupsList) {
+      if (iter.key().toStdString() == group) {
+        isItGroup = true;
+      }
+    }
+    if (!isItGroup) {
+      m_boolManager->setValue(iter.value(), true);
+    }
+  }
+}
+
+/*
+* Create a popup window to select a custom
+* selection of groups/pairs
+*/
+void MuonFitPropertyBrowser::genGroupWindow() {
+
+  QtGroupPropertyManager *groupManager =
+      new QtGroupPropertyManager(m_groupWindow);
+  QVBoxLayout *layout = new QVBoxLayout(m_groupWindow);
+  QtTreePropertyBrowser *groupBrowser = new QtTreePropertyBrowser();
+  QtProperty *groupSettings = groupManager->addProperty("Group/Pair selection");
+  for (auto iter = m_groupBoxes.constBegin(); iter != m_groupBoxes.constEnd();
+       ++iter) {
+    groupSettings->addSubProperty(m_groupBoxes.value(iter.key()));
+    m_boolManager->setValue(iter.value(), m_boolManager->value(iter.value()));
+  }
+  QtCheckBoxFactory *checkBoxFactory = new QtCheckBoxFactory(m_groupWindow);
+  groupBrowser->setFactoryForManager(m_boolManager, checkBoxFactory);
+  groupBrowser->addProperty(groupSettings);
+  layout->addWidget(groupBrowser);
+  m_groupWindow->setLayout(layout);
+  m_groupWindow->show();
+}
+/**
+* Sets checkboxes for periods
+* @param numPeriods :: [input] Number of periods
+*/
+void MuonFitPropertyBrowser::setNumPeriods(size_t numPeriods) {
+  m_periodsToFitOptions.clear();
+  // create more boxes
+  for (size_t i = 0; i != numPeriods; i++) {
+    QString name = QString::number(i + 1);
+    addPeriodCheckbox(name);
+  }
+  if (m_periodsToFitOptions.size() == 1) {
+    m_generateBtn->setDisabled(true);
+    m_multiFitSettingsGroup->property()->removeSubProperty(m_periodsToFit);
+    m_multiFitSettingsGroup->property()->removeSubProperty(m_showPeriods);
+    m_enumManager->setValue(m_periodsToFit, 0);
+    clearChosenPeriods();
+    m_boolManager->setValue(m_periodBoxes.constBegin().value(), true);
+  } else {
+    // add custom back into list
+    m_multiFitSettingsGroup->property()->insertSubProperty(m_periodsToFit,
+                                                           m_showGroup);
+    m_multiFitSettingsGroup->property()->addSubProperty(m_showPeriods);
+    m_generateBtn->setDisabled(false);
+
+    m_periodsToFitOptions << "Custom";
+    m_enumManager->setEnumNames(m_periodsToFit, m_periodsToFitOptions);
+  }
+}
+/**
+* Sets period names and updates checkboxes on UI
+* By default sets all unchecked
+* @param periods :: [input] List of period names
+*/
+void MuonFitPropertyBrowser::setAvailablePeriods(const QStringList &periods) {
+  // If it's the same list, do nothing
+  if (periods.size() == m_periodBoxes.size()) {
+    auto existingGroups = m_periodBoxes.keys();
+    auto newGroups = periods;
+    qSort(existingGroups);
+    qSort(newGroups);
+    if (existingGroups == newGroups) {
+      return;
+    }
+  }
+
+  clearPeriodCheckboxes();
+
+  for (const auto group : periods) {
+    addPeriodCheckbox(group);
+  }
+}
+/**
+* Clears all pair names and checkboxes
+* (ready to add new ones)
+*/
+void MuonFitPropertyBrowser::clearPeriodCheckboxes() {
+  if (m_periodBoxes.size() > 1) {
+    for (auto iter = m_periodBoxes.constBegin();
+         iter != m_periodBoxes.constEnd(); ++iter) {
+      if (iter != m_periodBoxes.constBegin()) {
+        delete (iter);
+      }
+    }
+  }
+  m_periodsToFitOptions.clear();
+  m_periodsToFitOptions << "1";
+  m_enumManager->setEnumNames(m_periodsToFit, m_periodsToFitOptions);
+}
+/**
+* Clears the list of selected groups (unchecks boxes)
+*/
+void MuonFitPropertyBrowser::clearChosenPeriods() const {
+  for (auto iter = m_periodBoxes.constBegin(); iter != m_periodBoxes.constEnd();
+       ++iter) {
+    m_boolManager->setValue(iter.value(), false);
+  }
+}
+/**
+* Add a new checkbox to the list of periods with given name
+* The new checkbox is unchecked by default
+* @param name :: [input] Name of period to add
+*/
+void MuonFitPropertyBrowser::addPeriodCheckbox(const QString &name) {
+  m_periodBoxes.insert(name, m_boolManager->addProperty(name));
+  int j = m_enumManager->value(m_periodsToFit);
+
+  // add new period to list will go after inital list
+  m_periodsToFitOptions << name;
+  auto active = getChosenPeriods();
+  m_enumManager->setEnumNames(m_periodsToFit, m_periodsToFitOptions);
+  setChosenPeriods(active);
+  m_enumManager->setValue(m_periodsToFit, j);
+}
+/**
+* Returns a list of the selected periods (checked boxes)
+* @returns :: list of selected periods
+*/
+QStringList MuonFitPropertyBrowser::getChosenPeriods() const {
+  QStringList chosen;
+  // if single period
+  if (m_periodsToFitOptions.size() == 1) {
+    chosen << "";
+  } else {
+    for (auto iter = m_periodBoxes.constBegin();
+         iter != m_periodBoxes.constEnd(); ++iter) {
+      if (m_boolManager->value(iter.value()) == true) {
+        chosen.append(iter.key());
+      }
+    }
+  }
+  return chosen;
+}
+/**
+* Ticks the selected periods
+* @param chosenPeriods :: list of selected periods
+*/
+void MuonFitPropertyBrowser::setChosenPeriods(
+    const QStringList &chosenPeriods) {
+  clearChosenPeriods();
+  for (auto selected : chosenPeriods) {
+    for (auto iter = m_periodBoxes.constBegin();
+         iter != m_periodBoxes.constEnd(); ++iter) {
+      auto tmp = iter.key();
+      if (iter.key() == selected) {
+        m_boolManager->setValue(iter.value(), true);
+      }
+    }
+  }
+}
+/**
+* Ticks the selected periods
+* @param period :: selected periods
+*/
+void MuonFitPropertyBrowser::setChosenPeriods(const QString &period) {
+  clearChosenPeriods();
+  for (auto iter = m_periodBoxes.constBegin(); iter != m_periodBoxes.constEnd();
+       ++iter) {
+    auto tmp = iter.key();
+    if (iter.key() == period) {
+      m_boolManager->setValue(iter.value(), true);
+    }
+  }
+}
+/*
+* Create a pop up window to select a custom
+* selection of periods
+*/
+void MuonFitPropertyBrowser::genPeriodWindow() {
+  QtGroupPropertyManager *groupManager =
+      new QtGroupPropertyManager(m_periodWindow);
+  QVBoxLayout *layout = new QVBoxLayout(m_periodWindow);
+  QtTreePropertyBrowser *groupBrowser = new QtTreePropertyBrowser();
+  QtProperty *groupSettings = groupManager->addProperty("Period selection");
+  for (auto iter = m_periodBoxes.constBegin(); iter != m_periodBoxes.constEnd();
+       ++iter) {
+    groupSettings->addSubProperty(m_periodBoxes.value(iter.key()));
+    m_boolManager->setValue(iter.value(), m_boolManager->value(iter.value()));
+  }
+  QtCheckBoxFactory *checkBoxFactory = new QtCheckBoxFactory(m_periodWindow);
+  groupBrowser->setFactoryForManager(m_boolManager, checkBoxFactory);
+  groupBrowser->addProperty(groupSettings);
+  layout->addWidget(groupBrowser);
+  m_periodWindow->setLayout(layout);
+  m_periodWindow->show();
+}
+/*
+* Create a pop up window to create
+* a combination of periods
+*/
+void MuonFitPropertyBrowser::genCombinePeriodWindow() {
+  QVBoxLayout *layout = new QVBoxLayout(m_comboWindow);
+  QFormLayout *formLayout = new QFormLayout;
+  m_positiveCombo = new QLineEdit();
+  m_negativeCombo = new QLineEdit();
+  formLayout->addRow(new QLabel(tr("Combine:")), m_positiveCombo);
+  formLayout->addRow(new QLabel(tr("   -    ")), m_negativeCombo);
+  layout->addLayout(formLayout);
+
+  QPushButton *applyBtn = new QPushButton("Apply");
+
+  connect(applyBtn, SIGNAL(released()), this, SLOT(combineBtnPressed()));
+
+  layout->addWidget(applyBtn);
+  m_comboWindow->setLayout(layout);
+  m_comboWindow->show();
+}
+/*
+* Get the positive and negative parts of the
+* combination of periods and produce a new
+* tick box. Unticked by default.
+*/
+void MuonFitPropertyBrowser::combineBtnPressed() {
+  QString value = m_positiveCombo->text();
+  if (value.isEmpty()) {
+    g_log.error("There are no positive periods (top box)");
+    return;
+  }
+  if (!m_negativeCombo->text().isEmpty()) {
+    value.append("-").append(m_negativeCombo->text());
+  }
+  m_positiveCombo->clear();
+  m_negativeCombo->clear();
+  addPeriodCheckbox(value);
+}
+/**
+* sets the label for a single fit and
+* selects the relevant group/pair
+* @param name :: string of the ws
+*/
+void MuonFitPropertyBrowser::setSingleFitLabel(std::string name) {
+  clearChosenGroups();
+  clearChosenPeriods();
+  std::vector<std::string> splitName;
+  std::string tmpName = name;
+  boost::erase_all(tmpName, " ");
+  boost::split(splitName, tmpName, boost::is_any_of(";"));
+  // set single group/pair
+  QString group = QString::fromUtf8(splitName[2].c_str());
+  setChosenGroup(group);
+  // set period if available
+  if (splitName.size() == 6) {
+    QString period = QString::fromUtf8(splitName[4].c_str());
+    setChosenPeriods(period);
+  }
+  setOutputName(name);
+  // for single fit in multi fit mode
+  if (m_browser->isItemVisible(m_multiFitSettingsGroup)) {
+    updateGroupDisplay();
+    updatePeriodDisplay();
+  }
+}
 
 } // MantidQt
 } // API
diff --git a/MantidQt/MantidWidgets/src/WorkspacePresenter/QWorkspaceDockView.cpp b/MantidQt/MantidWidgets/src/WorkspacePresenter/QWorkspaceDockView.cpp
index 89e1e64c1443f700339bc391fade683379eff086..efc22faa61175110a8a9572ca66f5098aac73efc 100644
--- a/MantidQt/MantidWidgets/src/WorkspacePresenter/QWorkspaceDockView.cpp
+++ b/MantidQt/MantidWidgets/src/WorkspacePresenter/QWorkspaceDockView.cpp
@@ -23,7 +23,6 @@
 #include <Poco/Path.h>
 
 #include <QFileDialog>
-#include <QHash>
 #include <QKeyEvent>
 #include <QMainWindow>
 #include <QMenu>
@@ -40,25 +39,6 @@ namespace {
 Mantid::Kernel::Logger docklog("MantidDockWidget");
 
 WorkspaceIcons WORKSPACE_ICONS = WorkspaceIcons();
-
-bool isAllMatrixWorkspaces(const WorkspaceGroup_const_sptr &wsGroup) {
-  bool allMatrixWSes = false;
-
-  if (wsGroup) {
-    if (!wsGroup->isEmpty()) {
-      allMatrixWSes = true;
-      for (int index = 0; index < wsGroup->getNumberOfEntries(); index++) {
-        if (nullptr == boost::dynamic_pointer_cast<MatrixWorkspace>(
-                           wsGroup->getItem(index))) {
-          allMatrixWSes = false;
-          break;
-        }
-      }
-    }
-  }
-
-  return allMatrixWSes;
-}
 }
 
 namespace MantidQt {
@@ -210,16 +190,6 @@ WorkspacePresenterWN_wptr QWorkspaceDockView::getPresenterWeakPtr() {
   return boost::dynamic_pointer_cast<WorkspacePresenter>(m_presenter);
 }
 
-MantidSurfacePlotDialog::UserInputSurface
-QWorkspaceDockView::chooseContourPlotOptions(int nWorkspaces) const {
-  return m_tree->chooseContourPlotOptions(nWorkspaces);
-}
-
-MantidSurfacePlotDialog::UserInputSurface
-QWorkspaceDockView::chooseSurfacePlotOptions(int nWorkspaces) const {
-  return m_tree->chooseSurfacePlotOptions(nWorkspaces);
-}
-
 /** Returns the names of the selected workspaces
 *   in the dock.
 */
@@ -606,6 +576,10 @@ void QWorkspaceDockView::createWorkspaceMenuActions() {
   connect(m_plotSpecErr, SIGNAL(triggered()), this,
           SLOT(onClickPlotSpectraErr()));
 
+  m_plotAdvanced = new QAction(tr("Plot Advanced..."), this);
+  connect(m_plotAdvanced, SIGNAL(triggered()), this,
+          SLOT(onClickPlotAdvanced()));
+
   m_colorFill = new QAction(tr("Color Fill Plot"), this);
   connect(m_colorFill, SIGNAL(triggered()), this,
           SLOT(onClickDrawColorFillPlot()));
@@ -688,12 +662,6 @@ void QWorkspaceDockView::createWorkspaceMenuActions() {
 
   m_clearUB = new QAction(tr("Clear UB Matrix"), this);
   connect(m_clearUB, SIGNAL(triggered()), this, SLOT(onClickClearUB()));
-
-  m_plotSurface = new QAction(tr("Plot Surface from Group"), this);
-  connect(m_plotSurface, SIGNAL(triggered()), this, SLOT(onClickPlotSurface()));
-
-  m_plotContour = new QAction(tr("Plot Contour from Group"), this);
-  connect(m_plotContour, SIGNAL(triggered()), this, SLOT(onClickPlotContour()));
 }
 
 /**
@@ -944,10 +912,12 @@ void QWorkspaceDockView::addMatrixWorkspaceMenuItems(
   menu->addSeparator();
   menu->addAction(m_plotSpec);
   menu->addAction(m_plotSpecErr);
+  menu->addAction(m_plotAdvanced);
 
   // Don't plot a spectrum if only one X value
   m_plotSpec->setEnabled(matrixWS->blocksize() > 1);
   m_plotSpecErr->setEnabled(matrixWS->blocksize() > 1);
+  m_plotAdvanced->setEnabled(matrixWS->blocksize() > 1);
 
   menu->addAction(m_showSpectrumViewer); // The 2D spectrum viewer
 
@@ -1029,33 +999,17 @@ void QWorkspaceDockView::addPeaksWorkspaceMenuItems(
 /**
 * Add the actions that are appropriate for a WorkspaceGroup
 * @param menu :: The menu to store the items
-* @param groupWS :: [input] Workspace group related to the menu
 */
-void QWorkspaceDockView::addWorkspaceGroupMenuItems(
-    QMenu *menu, const WorkspaceGroup_const_sptr &groupWS) const {
+void QWorkspaceDockView::addWorkspaceGroupMenuItems(QMenu *menu) const {
   m_plotSpec->setEnabled(true);
   menu->addAction(m_plotSpec);
   m_plotSpecErr->setEnabled(true);
   menu->addAction(m_plotSpecErr);
+  m_plotAdvanced->setEnabled(true);
+  menu->addAction(m_plotAdvanced);
   menu->addAction(m_colorFill);
   m_colorFill->setEnabled(true);
 
-  // If appropriate, add "plot surface" and "plot contour" options
-  // Only add these if:
-  // - there are >2 workspaces in group
-  // - all are MatrixWorkspaces (otherwise they can't be plotted)
-  // - only one group is selected
-  if (m_tree->selectedItems().size() == 1) {
-    if (groupWS && groupWS->getNumberOfEntries() > 2) {
-      if (isAllMatrixWorkspaces(groupWS)) {
-        menu->addAction(m_plotSurface);
-        m_plotSurface->setEnabled(true);
-        menu->addAction(m_plotContour);
-        m_plotContour->setEnabled(true);
-      }
-    }
-  }
-
   menu->addSeparator();
   menu->addAction(m_saveNexus);
 }
@@ -1276,7 +1230,7 @@ void QWorkspaceDockView::popupContextMenu() {
       addPeaksWorkspaceMenuItems(menu, peaksWS);
     } else if (auto groupWS =
                    boost::dynamic_pointer_cast<const WorkspaceGroup>(ws)) {
-      addWorkspaceGroupMenuItems(menu, groupWS);
+      addWorkspaceGroupMenuItems(menu);
     } else if (boost::dynamic_pointer_cast<const Mantid::API::ITableWorkspace>(
                    ws)) {
       addTableWorkspaceMenuItems(menu);
@@ -1509,24 +1463,50 @@ void QWorkspaceDockView::onClickPlotSpectraErr() {
   m_presenter->notifyFromView(ViewNotifiable::Flag::PlotSpectrumWithErrors);
 }
 
+void QWorkspaceDockView::onClickPlotAdvanced() {
+  m_presenter->notifyFromView(ViewNotifiable::Flag::PlotSpectrumAdvanced);
+}
+
 /** Plots one or more spectra from each selected workspace
-* @param showErrors If true, show error bars. Otherwise no error bars are
-* displayed.
+* @param type "Simple", "Errors" show error bars, "Advanced" advanced plotting.
 */
-void QWorkspaceDockView::plotSpectrum(bool showErrors) {
-  const auto userInput = m_tree->chooseSpectrumFromSelected();
+void QWorkspaceDockView::plotSpectrum(std::string type) {
+  const bool isAdvanced = type == "Advanced";
+  const auto userInput =
+      m_tree->chooseSpectrumFromSelected(true, true, true, isAdvanced);
   // An empty map will be returned if the user clicks cancel in the spectrum
   // selection
   if (userInput.plots.empty()) {
     return;
   }
+  bool showErrorBars =
+      ((type == "Errors") || (type == "Advanced" && userInput.errors));
 
+  // mantidUI knows nothing about userInput, hence the long argument lists.
   if (userInput.tiled) {
     m_mantidUI->plotSubplots(userInput.plots, MantidQt::DistributionDefault,
-                             showErrors);
-  } else {
-    m_mantidUI->plot1D(userInput.plots, true, MantidQt::DistributionDefault,
-                       showErrors, nullptr, false, userInput.waterfall);
+                             showErrorBars);
+  } else if (userInput.simple || userInput.waterfall) {
+    if (userInput.isAdvanced) {
+      m_mantidUI->plot1D(userInput.plots, true, MantidQt::DistributionDefault,
+                         showErrorBars, nullptr, false, userInput.waterfall,
+                         userInput.advanced.logName,
+                         userInput.advanced.customLogValues);
+    } else {
+      m_mantidUI->plot1D(userInput.plots, true, MantidQt::DistributionDefault,
+                         showErrorBars, nullptr, false, userInput.waterfall);
+    }
+
+  } else if (userInput.surface) {
+    m_mantidUI->plotSurface(
+        userInput.advanced.accepted, userInput.advanced.plotIndex,
+        userInput.advanced.axisName, userInput.advanced.logName,
+        userInput.advanced.customLogValues, userInput.advanced.workspaceNames);
+  } else if (userInput.contour) {
+    m_mantidUI->plotContour(
+        userInput.advanced.accepted, userInput.advanced.plotIndex,
+        userInput.advanced.axisName, userInput.advanced.logName,
+        userInput.advanced.customLogValues, userInput.advanced.workspaceNames);
   }
 }
 
@@ -1690,24 +1670,6 @@ void QWorkspaceDockView::onClickClearUB() {
   m_presenter->notifyFromView(ViewNotifiable::Flag::ClearUBMatrix);
 }
 
-void QWorkspaceDockView::onClickPlotSurface() {
-  m_presenter->notifyFromView(ViewNotifiable::Flag::ShowSurfacePlot);
-}
-
-/**
-* Create a 3D surface plot from the selected workspace group
-*/
-void QWorkspaceDockView::showSurfacePlot() { m_mantidUI->showSurfacePlot(); }
-
-void QWorkspaceDockView::onClickPlotContour() {
-  m_presenter->notifyFromView(ViewNotifiable::Flag::ShowContourPlot);
-}
-
-/**
-* Create a contour plot from the selected workspace group
-*/
-void QWorkspaceDockView::showContourPlot() { m_mantidUI->showContourPlot(); }
-
 /**
 * Allows asynchronous execution of algorithms. This method is made
 * available in the view for access by the presenter in order to
diff --git a/MantidQt/MantidWidgets/src/WorkspacePresenter/WorkspacePresenter.cpp b/MantidQt/MantidWidgets/src/WorkspacePresenter/WorkspacePresenter.cpp
index 512e7d4f1ca0623c3497b2b7641a35b70add388a..3cd00322bf4dab7e67e5b72d3a0823b6dee62931 100644
--- a/MantidQt/MantidWidgets/src/WorkspacePresenter/WorkspacePresenter.cpp
+++ b/MantidQt/MantidWidgets/src/WorkspacePresenter/WorkspacePresenter.cpp
@@ -101,6 +101,9 @@ void WorkspacePresenter::notifyFromView(ViewNotifiable::Flag flag) {
   case ViewNotifiable::Flag::PlotSpectrumWithErrors:
     plotSpectrumWithErrors();
     break;
+  case ViewNotifiable::Flag::PlotSpectrumAdvanced:
+    plotSpectrumAdvanced();
+    break;
   case ViewNotifiable::Flag::ShowColourFillPlot:
     showColourFillPlot();
     break;
@@ -146,12 +149,6 @@ void WorkspacePresenter::notifyFromView(ViewNotifiable::Flag flag) {
   case ViewNotifiable::Flag::ClearUBMatrix:
     clearUBMatrix();
     break;
-  case ViewNotifiable::Flag::ShowSurfacePlot:
-    showSurfacePlot();
-    break;
-  case ViewNotifiable::Flag::ShowContourPlot:
-    showContourPlot();
-    break;
   case ViewNotifiable::Flag::RefreshWorkspaces:
     refreshWorkspaces();
     break;
@@ -309,12 +306,17 @@ void WorkspacePresenter::saveToProgram() {
 
 void WorkspacePresenter::plotSpectrum() {
   auto view = lockView();
-  view->plotSpectrum(false);
+  view->plotSpectrum("Simple");
 }
 
 void WorkspacePresenter::plotSpectrumWithErrors() {
   auto view = lockView();
-  view->plotSpectrum(true);
+  view->plotSpectrum("Errors");
+}
+
+void WorkspacePresenter::plotSpectrumAdvanced() {
+  auto view = lockView();
+  view->plotSpectrum("Advanced");
 }
 
 void WorkspacePresenter::showColourFillPlot() {
@@ -404,16 +406,6 @@ void WorkspacePresenter::clearUBMatrix() {
   }
 }
 
-void WorkspacePresenter::showSurfacePlot() {
-  auto view = lockView();
-  view->showSurfacePlot();
-}
-
-void WorkspacePresenter::showContourPlot() {
-  auto view = lockView();
-  view->showContourPlot();
-}
-
 void WorkspacePresenter::refreshWorkspaces() { updateView(); }
 
 void WorkspacePresenter::workspaceLoaded() { updateView(); }
diff --git a/MantidQt/MantidWidgets/test/WorkspacePresenter/WorkspacePresenterTest.h b/MantidQt/MantidWidgets/test/WorkspacePresenter/WorkspacePresenterTest.h
index 3d0ebddeca4e12019690b0a6ac1515edfe5d09cf..ce6c681f750cb28837e7e670a7b89c895023bc91 100644
--- a/MantidQt/MantidWidgets/test/WorkspacePresenter/WorkspacePresenterTest.h
+++ b/MantidQt/MantidWidgets/test/WorkspacePresenter/WorkspacePresenterTest.h
@@ -531,17 +531,23 @@ public:
   }
 
   void testPlotSpectrum() {
-    EXPECT_CALL(*mockView.get(), plotSpectrum(false)).Times(Exactly(1));
+    EXPECT_CALL(*mockView.get(), plotSpectrum("Simple")).Times(Exactly(1));
     presenter->notifyFromView(ViewNotifiable::Flag::PlotSpectrum);
     TS_ASSERT(Mock::VerifyAndClearExpectations(&mockView));
   }
 
   void testPlotSpectrumWithErrors() {
-    EXPECT_CALL(*mockView.get(), plotSpectrum(true)).Times(Exactly(1));
+    EXPECT_CALL(*mockView.get(), plotSpectrum("Errors")).Times(Exactly(1));
     presenter->notifyFromView(ViewNotifiable::Flag::PlotSpectrumWithErrors);
     TS_ASSERT(Mock::VerifyAndClearExpectations(&mockView));
   }
 
+  void testPlotSpectrumAdvanced() {
+    EXPECT_CALL(*mockView.get(), plotSpectrum("Advanced")).Times(Exactly(1));
+    presenter->notifyFromView(ViewNotifiable::Flag::PlotSpectrumAdvanced);
+    TS_ASSERT(Mock::VerifyAndClearExpectations(&mockView));
+  }
+
   void testShowColourFillPlot() {
     EXPECT_CALL(*mockView.get(), showColourFillPlot()).Times(Exactly(1));
     presenter->notifyFromView(ViewNotifiable::Flag::ShowColourFillPlot);
@@ -648,18 +654,6 @@ public:
     AnalysisDataService::Instance().remove("ws1");
   }
 
-  void testShowSurfacePlot() {
-    EXPECT_CALL(*mockView.get(), showSurfacePlot()).Times(Exactly(1));
-    presenter->notifyFromView(ViewNotifiable::Flag::ShowSurfacePlot);
-    TS_ASSERT(Mock::VerifyAndClearExpectations(&mockView));
-  }
-
-  void testShowContourPlot() {
-    EXPECT_CALL(*mockView.get(), showContourPlot()).Times(Exactly(1));
-    presenter->notifyFromView(ViewNotifiable::Flag::ShowContourPlot);
-    TS_ASSERT(Mock::VerifyAndClearExpectations(&mockView));
-  }
-
 private:
   boost::shared_ptr<NiceMock<MockWorkspaceDockView>> mockView;
   WorkspacePresenterVN_sptr presenter;
diff --git a/MantidQt/SliceViewer/src/LineViewer.cpp b/MantidQt/SliceViewer/src/LineViewer.cpp
index 3b7d28000928484e72d73d69fffbbe82844d12f2..096773b0bc82799134d17fab47578233b0a8e04f 100644
--- a/MantidQt/SliceViewer/src/LineViewer.cpp
+++ b/MantidQt/SliceViewer/src/LineViewer.cpp
@@ -1027,7 +1027,7 @@ void LineViewer::setupScaleEngine(MantidQwtWorkspaceData &curveData) {
 
   if (m_lineOptions->isLogScaledY()) {
     engine = new QwtLog10ScaleEngine();
-    curveData.saveLowestPositiveValue(from);
+    curveData.setMinimumPositiveValue(from);
   } else {
     engine = new QwtLinearScaleEngine();
   }
diff --git a/MantidQt/SliceViewer/src/SliceViewer.cpp b/MantidQt/SliceViewer/src/SliceViewer.cpp
index 27c68cffc80119d4a12ee34f33082b6b324bb8aa..179aedc16025b4ad06c596aa15aba6d6d4ef4c3a 100644
--- a/MantidQt/SliceViewer/src/SliceViewer.cpp
+++ b/MantidQt/SliceViewer/src/SliceViewer.cpp
@@ -2996,6 +2996,7 @@ void SliceViewer::applyOrthogonalAxisScaleDraw() {
   auto *axis1 = new QwtScaleDraw();
   m_plot->setAxisScaleDraw(QwtPlot::xBottom, axis0);
   m_plot->setAxisScaleDraw(QwtPlot::yLeft, axis1);
+  this->updateDisplay();
 }
 
 } // namespace
diff --git a/MantidQt/SpectrumViewer/src/GraphDisplay.cpp b/MantidQt/SpectrumViewer/src/GraphDisplay.cpp
index a56e9cb5d0400114433d8af0efb3a75b050eca57..66ce72ba07f9214f0db8b3fa36b3c6bf9675657f 100644
--- a/MantidQt/SpectrumViewer/src/GraphDisplay.cpp
+++ b/MantidQt/SpectrumViewer/src/GraphDisplay.cpp
@@ -1,12 +1,14 @@
+#include "MantidQtSpectrumViewer/GraphDisplay.h"
+
+#include "MantidQtSpectrumViewer/QtUtils.h"
+#include "MantidQtSpectrumViewer/SVUtils.h"
+
+#include <boost/algorithm/clamp.hpp>
 #include <QtGui>
 #include <QVector>
 #include <QString>
 #include <qwt_scale_engine.h>
 
-#include "MantidQtSpectrumViewer/GraphDisplay.h"
-#include "MantidQtSpectrumViewer/QtUtils.h"
-#include "MantidQtSpectrumViewer/SVUtils.h"
-
 namespace MantidQt {
 namespace SpectrumView {
 
@@ -132,14 +134,32 @@ void GraphDisplay::clear() {
 void GraphDisplay::setRangeScale(double rangeScale) {
   m_rangeScale = rangeScale;
 
+  // A helper function to limit min and max to finite values.
+  auto clampRange = [](double &min, double &max) {
+    const double low = std::numeric_limits<double>::lowest();
+    const double high = std::numeric_limits<double>::max();
+    min = boost::algorithm::clamp(min, low, high, std::less_equal<double>());
+    max = boost::algorithm::clamp(max, low, high, std::less_equal<double>());
+  };
+
   if (m_isVertical) {
+    double axis_min = m_minX;
     double axis_max = m_rangeScale * (m_maxX - m_minX) + m_minX;
-    m_graphPlot->setAxisScale(QwtPlot::xBottom, m_minX, axis_max);
-    m_graphPlot->setAxisScale(QwtPlot::yLeft, m_minY, m_maxY);
+    clampRange(axis_min, axis_max);
+    m_graphPlot->setAxisScale(QwtPlot::xBottom, axis_min, axis_max);
+    axis_min = m_minY;
+    axis_max = m_maxY;
+    clampRange(axis_min, axis_max);
+    m_graphPlot->setAxisScale(QwtPlot::yLeft, axis_min, axis_max);
   } else {
+    double axis_min = m_minY;
     double axis_max = m_rangeScale * (m_maxY - m_minY) + m_minY;
-    m_graphPlot->setAxisScale(QwtPlot::yLeft, m_minY, axis_max);
-    m_graphPlot->setAxisScale(QwtPlot::xBottom, m_minX, m_maxX);
+    clampRange(axis_min, axis_max);
+    m_graphPlot->setAxisScale(QwtPlot::yLeft, axis_min, axis_max);
+    axis_min = m_minX;
+    axis_max = m_maxX;
+    clampRange(axis_min, axis_max);
+    m_graphPlot->setAxisScale(QwtPlot::xBottom, axis_min, axis_max);
   }
   m_graphPlot->replot();
 }
diff --git a/MantidQt/SpectrumViewer/src/MatrixWSDataSource.cpp b/MantidQt/SpectrumViewer/src/MatrixWSDataSource.cpp
index 95b0715007ebbb004df85c939aef291860585c82..8ccec01cea134afa0bb07c8b0233c33f608b610b 100644
--- a/MantidQt/SpectrumViewer/src/MatrixWSDataSource.cpp
+++ b/MantidQt/SpectrumViewer/src/MatrixWSDataSource.cpp
@@ -172,7 +172,7 @@ DataArray_const_sptr MatrixWSDataSource::getDataArray(double xMin, double xMax,
   MantidVec err;
   yVals.resize(numCols);
   err.resize(numCols);
-  size_t index = 0;
+  auto newDataIter = newData.begin();
   for (size_t i = 0; i < numRows; i++) {
     double midY = yMin + ((double)i + 0.5) * yStep;
     SVUtils::Interpolate(m_totalYMin, m_totalYMax, midY, 0.0,
@@ -184,10 +184,9 @@ DataArray_const_sptr MatrixWSDataSource::getDataArray(double xMin, double xMax,
     err.resize(numCols, 0);
 
     m_matWs->generateHistogram(sourceRow, xScale, yVals, err, true);
-    for (size_t col = 0; col < numCols; col++) {
-      newData[index] = (float)yVals[col];
-      index++;
-    }
+    newDataIter =
+        std::transform(yVals.cbegin(), yVals.cend(), newDataIter,
+                       [](const double y) { return static_cast<float>(y); });
   }
 
   // The calling code is responsible for deleting the DataArray when it is done
diff --git a/MantidQt/SpectrumViewer/src/SVUtils.cpp b/MantidQt/SpectrumViewer/src/SVUtils.cpp
index d2b5bd223beb65eb16c14aa27f545bd45dad5f39..0817ac0d3a8b7658ae23204253f11a279ea7d2b0 100644
--- a/MantidQt/SpectrumViewer/src/SVUtils.cpp
+++ b/MantidQt/SpectrumViewer/src/SVUtils.cpp
@@ -96,20 +96,17 @@ bool SVUtils::FindValidInterval(double &min, double &max) {
   if (max == min) // adjust values so they are not equal
   {
     valuesOK = false;
-    if (min == 0) {
-      min = -1, max = 1;
+    if (min == 0.0) {
+      min = -1.0;
+      max = 1.0;
     } else {
-      max = 1.1 * max;
-      min = 0.9 * min;
+      min *= 0.9;
+      max *= 1.1;
     }
-  }
-
-  if (min > max) // fix the order
+  } else if (min > max) // fix the order
   {
     valuesOK = false;
-    double temp = min;
-    min = max;
-    max = temp;
+    std::swap(min, max);
   }
 
   return valuesOK;
diff --git a/Testing/Data/SystemTest/35991-foc-h00.nxs.md5 b/Testing/Data/SystemTest/35991-foc-h00.nxs.md5
index f3deaffca81a1e7556dee7d38190d0b6bfebda84..36bbeba4eaa8c8e6cecb1b0afdeda5d0f4b71279 100644
--- a/Testing/Data/SystemTest/35991-foc-h00.nxs.md5
+++ b/Testing/Data/SystemTest/35991-foc-h00.nxs.md5
@@ -1 +1 @@
-265a7b2885316076a74893cfd685ed32
+871339d7b852e618f9943d79e0127e4e
diff --git a/Testing/Data/SystemTest/ILL/IN16B/140678.nxs.md5 b/Testing/Data/SystemTest/ILL/IN16B/140678.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..0cdc6e0f6cac2c4c054468f00f5bf09f741888a7
--- /dev/null
+++ b/Testing/Data/SystemTest/ILL/IN16B/140678.nxs.md5
@@ -0,0 +1 @@
+0eea7509924a5e4912e6685638af2467
diff --git a/Testing/Data/SystemTest/ILL/IN16B/140679.nxs.md5 b/Testing/Data/SystemTest/ILL/IN16B/140679.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..a56d272612db670ee083533ff812ead2da4ad6b6
--- /dev/null
+++ b/Testing/Data/SystemTest/ILL/IN16B/140679.nxs.md5
@@ -0,0 +1 @@
+141fbb9be2b046e6d5a7eb73f0c3517c
diff --git a/Testing/Data/SystemTest/ILL/IN16B/140680.nxs.md5 b/Testing/Data/SystemTest/ILL/IN16B/140680.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..d4ec7c941c4538e27e52fc943d568bc2bbed26dd
--- /dev/null
+++ b/Testing/Data/SystemTest/ILL/IN16B/140680.nxs.md5
@@ -0,0 +1 @@
+80d8baadfe75b444957c96671f526199
diff --git a/Testing/Data/SystemTest/ILL/IN16B/140681.nxs.md5 b/Testing/Data/SystemTest/ILL/IN16B/140681.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..435035e7b691f7025218b605797fd62543041857
--- /dev/null
+++ b/Testing/Data/SystemTest/ILL/IN16B/140681.nxs.md5
@@ -0,0 +1 @@
+f9e8990b52a9b342c4f259e2c3b0e816
diff --git a/Testing/Data/SystemTest/ILL/IN16B/140682.nxs.md5 b/Testing/Data/SystemTest/ILL/IN16B/140682.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..fa052096d2d1cbca40924997deb3874521b3059e
--- /dev/null
+++ b/Testing/Data/SystemTest/ILL/IN16B/140682.nxs.md5
@@ -0,0 +1 @@
+d3f95a48b4d1faaf51aa2f809fe87cd0
diff --git a/Testing/Data/SystemTest/SANS2D/DIRECT_SANS2D_REAR_34327_4m_8mm_16Feb16.txt.md5 b/Testing/Data/SystemTest/SANS2D/DIRECT_SANS2D_REAR_34327_4m_8mm_16Feb16.txt.md5
new file mode 100644
index 0000000000000000000000000000000000000000..5aa700540e452da26eb6b16e61287f25b2bba832
--- /dev/null
+++ b/Testing/Data/SystemTest/SANS2D/DIRECT_SANS2D_REAR_34327_4m_8mm_16Feb16.txt.md5
@@ -0,0 +1 @@
+d64495831325a63e1b961776a8544599
diff --git a/Testing/Data/SystemTest/SANS2D/MASK_SANS2D_REAR_module2_tube12.xml.md5 b/Testing/Data/SystemTest/SANS2D/MASK_SANS2D_REAR_module2_tube12.xml.md5
new file mode 100644
index 0000000000000000000000000000000000000000..6f29bdcd62556866a4ed44fc62f529c15a604af6
--- /dev/null
+++ b/Testing/Data/SystemTest/SANS2D/MASK_SANS2D_REAR_module2_tube12.xml.md5
@@ -0,0 +1 @@
+d205f4893ef943234071195b6ed98bed
diff --git a/Testing/Data/SystemTest/SANS2D/MASK_SANS2D_beam_stop_4m_x_100mm_2July2015_medium_beamstop.xml.md5 b/Testing/Data/SystemTest/SANS2D/MASK_SANS2D_beam_stop_4m_x_100mm_2July2015_medium_beamstop.xml.md5
new file mode 100644
index 0000000000000000000000000000000000000000..b053d46750d5e0c1fdcc6a70c780472d7e0f68f6
--- /dev/null
+++ b/Testing/Data/SystemTest/SANS2D/MASK_SANS2D_beam_stop_4m_x_100mm_2July2015_medium_beamstop.xml.md5
@@ -0,0 +1 @@
+e7a82ba82ddcf91cacbecd1b603fffbe
diff --git a/Testing/Data/SystemTest/SANS2D/SANS2D00034461.nxs.md5 b/Testing/Data/SystemTest/SANS2D/SANS2D00034461.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..724677275ff3a42eb1b5538e4f1f28c08abc553e
--- /dev/null
+++ b/Testing/Data/SystemTest/SANS2D/SANS2D00034461.nxs.md5
@@ -0,0 +1 @@
+02e693a2b832c1ea259c18239bd7cd47
diff --git a/Testing/Data/SystemTest/SANS2D/SANS2D00034481.nxs.md5 b/Testing/Data/SystemTest/SANS2D/SANS2D00034481.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..1f2c5cb910d7a9c6ef7255e28e1c8633d530ae23
--- /dev/null
+++ b/Testing/Data/SystemTest/SANS2D/SANS2D00034481.nxs.md5
@@ -0,0 +1 @@
+64c1cbe245360f5990909f2b7d4cc123
diff --git a/Testing/Data/SystemTest/SANS2D/SANS2D00034484.nxs.md5 b/Testing/Data/SystemTest/SANS2D/SANS2D00034484.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..62fd7030e419dfbe0c771eb2433364d8150dc2f2
--- /dev/null
+++ b/Testing/Data/SystemTest/SANS2D/SANS2D00034484.nxs.md5
@@ -0,0 +1 @@
+408bcafaaacb4af31970bdf84563cf87
diff --git a/Testing/Data/SystemTest/SANS2D/SANS2D00034502.nxs.md5 b/Testing/Data/SystemTest/SANS2D/SANS2D00034502.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..1c8423ce9f9ffd808f941f14c9c913b7d999aa03
--- /dev/null
+++ b/Testing/Data/SystemTest/SANS2D/SANS2D00034502.nxs.md5
@@ -0,0 +1 @@
+552dda99569562306ab421b1054f7171
diff --git a/Testing/Data/SystemTest/SANS2D/SANS2D00034505.nxs.md5 b/Testing/Data/SystemTest/SANS2D/SANS2D00034505.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..c2b0eebda89b2a53b059f595fb5a83bcb12bb76f
--- /dev/null
+++ b/Testing/Data/SystemTest/SANS2D/SANS2D00034505.nxs.md5
@@ -0,0 +1 @@
+6ac86823652c912cad4cbc1d6aed7d75
diff --git a/Testing/Data/SystemTest/SANS2D/TUBE_SANS2D_BOTH_31681_25Sept15.nxs.md5 b/Testing/Data/SystemTest/SANS2D/TUBE_SANS2D_BOTH_31681_25Sept15.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..4030886036723a942560d31ebc3a2b6388374afc
--- /dev/null
+++ b/Testing/Data/SystemTest/SANS2D/TUBE_SANS2D_BOTH_31681_25Sept15.nxs.md5
@@ -0,0 +1 @@
+e0b1f25d4a1746e9f3196e4214959d80
diff --git a/Testing/Data/SystemTest/SANS2D/USER_SANS2D_154E_2p4_4m_M3_Xpress_8mm_SampleChanger.txt.md5 b/Testing/Data/SystemTest/SANS2D/USER_SANS2D_154E_2p4_4m_M3_Xpress_8mm_SampleChanger.txt.md5
new file mode 100644
index 0000000000000000000000000000000000000000..9ee436a47608bbf59486dafe07bc9a25a4789ad4
--- /dev/null
+++ b/Testing/Data/SystemTest/SANS2D/USER_SANS2D_154E_2p4_4m_M3_Xpress_8mm_SampleChanger.txt.md5
@@ -0,0 +1 @@
+d4f1dee75274e1a3f36281e678f4d277
diff --git a/Testing/Data/SystemTest/WISH00038237.raw.md5 b/Testing/Data/SystemTest/WISH00038237.raw.md5
new file mode 100644
index 0000000000000000000000000000000000000000..dcdbf3169550522802e8b0d3e17adf6d8efe35e5
--- /dev/null
+++ b/Testing/Data/SystemTest/WISH00038237.raw.md5
@@ -0,0 +1 @@
+744951e81f99b22a8a7bc32f1e9dee64
diff --git a/Testing/Data/SystemTest/WISHPredictedSingleCrystalPeaks.nxs.md5 b/Testing/Data/SystemTest/WISHPredictedSingleCrystalPeaks.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..080087f4a8dac87020d7a23e2fe1e4d98100d669
--- /dev/null
+++ b/Testing/Data/SystemTest/WISHPredictedSingleCrystalPeaks.nxs.md5
@@ -0,0 +1 @@
+4900e3e61488bd1b025d445a7095f238
diff --git a/Testing/Data/SystemTest/WishAnalysis.nxs.md5 b/Testing/Data/SystemTest/WishAnalysis.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..cc062f02c3b7a22f53db64833a028aa5e62c29f8
--- /dev/null
+++ b/Testing/Data/SystemTest/WishAnalysis.nxs.md5
@@ -0,0 +1 @@
+a62c0a4ffa9e79da7916f5e1370f8919
diff --git a/Testing/Data/SystemTest/predict_peaks_test_random_ub.nxs.md5 b/Testing/Data/SystemTest/predict_peaks_test_random_ub.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..8200440b5f6a6b27d57ab32dd6b1065305bc35fd
--- /dev/null
+++ b/Testing/Data/SystemTest/predict_peaks_test_random_ub.nxs.md5
@@ -0,0 +1 @@
+2cc49505ae5bdd6dfb8c5bddfa2c694f
diff --git a/Testing/Data/UnitTest/MASK_SANS2D_BOTH_Extras_24Mar2015.xml.md5 b/Testing/Data/UnitTest/MASK_SANS2D_BOTH_Extras_24Mar2015.xml.md5
new file mode 100644
index 0000000000000000000000000000000000000000..b664590b6b250a6c60752ab844a0b57875dda994
--- /dev/null
+++ b/Testing/Data/UnitTest/MASK_SANS2D_BOTH_Extras_24Mar2015.xml.md5
@@ -0,0 +1 @@
+d327787830f80fec05b8b7c3af7de726
diff --git a/Testing/Data/UnitTest/MASK_SANS2D_FRONT_Edges_16Mar2015.xml.md5 b/Testing/Data/UnitTest/MASK_SANS2D_FRONT_Edges_16Mar2015.xml.md5
new file mode 100644
index 0000000000000000000000000000000000000000..fb962c799badc82d8ac1862cbbe65102de95751a
--- /dev/null
+++ b/Testing/Data/UnitTest/MASK_SANS2D_FRONT_Edges_16Mar2015.xml.md5
@@ -0,0 +1 @@
+cc6749dc7c34bb937e43753e89cd7e93
diff --git a/Testing/Data/UnitTest/MASK_SANS2D_REAR_Bottom_3_tubes_16May2014.xml.md5 b/Testing/Data/UnitTest/MASK_SANS2D_REAR_Bottom_3_tubes_16May2014.xml.md5
new file mode 100644
index 0000000000000000000000000000000000000000..d751c75e1d7ebd4396150167fdce9d86b85fa3dd
--- /dev/null
+++ b/Testing/Data/UnitTest/MASK_SANS2D_REAR_Bottom_3_tubes_16May2014.xml.md5
@@ -0,0 +1 @@
+5b3196a5b6f7d8a361bc417e1505c81d
diff --git a/Testing/Data/UnitTest/MASK_SANS2D_REAR_Edges_16Mar2015.xml.md5 b/Testing/Data/UnitTest/MASK_SANS2D_REAR_Edges_16Mar2015.xml.md5
new file mode 100644
index 0000000000000000000000000000000000000000..9091fa5db3db462985b95aee80c2d8f13fc38f91
--- /dev/null
+++ b/Testing/Data/UnitTest/MASK_SANS2D_REAR_Edges_16Mar2015.xml.md5
@@ -0,0 +1 @@
+f561508de44753d8a870bb6c133ed1ba
diff --git a/Testing/Data/UnitTest/POL00100.nxs.md5 b/Testing/Data/UnitTest/POL00100.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..bcfcae2898597f75a6da186c482470660e78dcb3
--- /dev/null
+++ b/Testing/Data/UnitTest/POL00100.nxs.md5
@@ -0,0 +1 @@
+1179865bb8336ab9a9362a7a4cd9670d
diff --git a/Testing/Data/UnitTest/POL00100.s1.md5 b/Testing/Data/UnitTest/POL00100.s1.md5
new file mode 100644
index 0000000000000000000000000000000000000000..bcfcae2898597f75a6da186c482470660e78dcb3
--- /dev/null
+++ b/Testing/Data/UnitTest/POL00100.s1.md5
@@ -0,0 +1 @@
+1179865bb8336ab9a9362a7a4cd9670d
diff --git a/Testing/Data/UnitTest/POL00100.s2.md5 b/Testing/Data/UnitTest/POL00100.s2.md5
new file mode 100644
index 0000000000000000000000000000000000000000..0c4cfb9e92f080f9824159604497965f7fa2ad8e
--- /dev/null
+++ b/Testing/Data/UnitTest/POL00100.s2.md5
@@ -0,0 +1 @@
+56174685a52192bdce71c8824238f098
diff --git a/Testing/Data/UnitTest/POL00101.nxs.md5 b/Testing/Data/UnitTest/POL00101.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..0c4cfb9e92f080f9824159604497965f7fa2ad8e
--- /dev/null
+++ b/Testing/Data/UnitTest/POL00101.nxs.md5
@@ -0,0 +1 @@
+56174685a52192bdce71c8824238f098
diff --git a/Testing/Data/UnitTest/POL95597.nxs.md5 b/Testing/Data/UnitTest/POL95597.nxs.md5
deleted file mode 100644
index 5ba7d1448c2952f4b7b1491c8b9e20e4aa158ad3..0000000000000000000000000000000000000000
--- a/Testing/Data/UnitTest/POL95597.nxs.md5
+++ /dev/null
@@ -1 +0,0 @@
-8dfe71a63ad15a9880fb6c7ab3165151
diff --git a/Testing/Data/UnitTest/POL95598.nxs.md5 b/Testing/Data/UnitTest/POL95598.nxs.md5
deleted file mode 100644
index 12401500e07ca77b14aa4969c3ab6798dac87939..0000000000000000000000000000000000000000
--- a/Testing/Data/UnitTest/POL95598.nxs.md5
+++ /dev/null
@@ -1 +0,0 @@
-a5c54ed2762cd13e6a75f50d09d6b5d7
diff --git a/Testing/Data/UnitTest/dnstof.d_dat.md5 b/Testing/Data/UnitTest/dnstof.d_dat.md5
new file mode 100644
index 0000000000000000000000000000000000000000..9d42867130c1dfa824b80f78cd7bd2f0534e4912
--- /dev/null
+++ b/Testing/Data/UnitTest/dnstof.d_dat.md5
@@ -0,0 +1 @@
+22cfa93c259ea5cc843dd01dec305cd6
diff --git a/Testing/Data/UnitTest/test_user_file_sans2d.txt.md5 b/Testing/Data/UnitTest/test_user_file_sans2d.txt.md5
new file mode 100644
index 0000000000000000000000000000000000000000..463f24157794a1865798311794b7534d492cbb57
--- /dev/null
+++ b/Testing/Data/UnitTest/test_user_file_sans2d.txt.md5
@@ -0,0 +1 @@
+28723764f0423ecf9090d57bae52fe23
diff --git a/Testing/SystemTests/tests/analysis/CountReflectionsTest.py b/Testing/SystemTests/tests/analysis/CountReflectionsTest.py
new file mode 100644
index 0000000000000000000000000000000000000000..1e919ac4ee1999da35920b25ce2bd47238f4c0c5
--- /dev/null
+++ b/Testing/SystemTests/tests/analysis/CountReflectionsTest.py
@@ -0,0 +1,39 @@
+import stresstesting
+from mantid.simpleapi import *
+from SortHKLTest import HKLStatisticsTestMixin
+
+
+class CountReflectionsTest(HKLStatisticsTestMixin, stresstesting.MantidStressTest):
+    '''
+    This systemtest follows the same principle as the one for SortHKL. It loads data,
+    computes statistics and checks them against reference data obtained from another
+    software package (SORTAV, see SortHKLTest.py for a reference).
+    '''
+
+    def runTest(self):
+        self._init_test_data()
+        self.test_CountReflections()
+
+    def test_CountReflections(self):
+        for space_group in self._space_groups:
+            ub_parameters = self._load_ub_parameters(space_group)
+            reflections = self._load_reflections(space_group, ub_parameters)
+            reference_statistics = self._load_reference_statistics(space_group)
+
+            statistics = self._run_count_reflections(reflections, space_group)
+
+            self._compare_statistics(statistics._asdict(), reference_statistics)
+
+    def _run_count_reflections(self, reflections, space_group):
+        point_group = self._get_point_group(space_group).getHMSymbol()
+        centering = space_group[0]
+
+        return CountReflections(InputWorkspace=reflections, PointGroup=point_group,
+                                LatticeCentering=centering, MinDSpacing=0.5, MaxDSpacing=10.0)
+
+    def _compare_statistics(self, statistics, reference_statistics):
+        self.assertEquals(round(statistics['Redundancy'], 1), round(reference_statistics['<N>'], 1))
+        self.assertEquals(statistics['UniqueReflections'], int(reference_statistics['Nunique']))
+        self.assertDelta(round(statistics['Completeness'] * 100.0, 1),
+                         round(reference_statistics['Completeness'], 1),
+                         0.5)
diff --git a/Testing/SystemTests/tests/analysis/ILLIndirectReductionFWS.py b/Testing/SystemTests/tests/analysis/ILLIndirectReductionFWS.py
index 3663b9608ad0b04063952ce469e9a871977112be..9cf36c940831890e4ea53ab6e05389ad30e64e9a 100644
--- a/Testing/SystemTests/tests/analysis/ILLIndirectReductionFWS.py
+++ b/Testing/SystemTests/tests/analysis/ILLIndirectReductionFWS.py
@@ -1,6 +1,7 @@
 import stresstesting
-from mantid.simpleapi import *
-from mantid import config
+from mantid.simpleapi import CompareWorkspaces, LoadNexusProcessed, IndirectILLReductionFWS
+from mantid import config, mtd
+import numpy
 
 
 class ILLIndirectReductionFWSTest(stresstesting.MantidStressTest):
@@ -34,7 +35,8 @@ class ILLIndirectReductionFWSTest(stresstesting.MantidStressTest):
         return ["165944.nxs", "165945.nxs", "165946.nxs", "165947.nxs", "165948.nxs",
                 "165949.nxs", "165950.nxs", "165951.nxs", "165952.nxs", "165953.nxs",
                 "143720.nxs", "143721.nxs", "143722.nxs", "143723.nxs", "143724.nxs",
-                "143725.nxs", "143726.nxs", "143727.nxs", "143728.nxs", "143729.nxs"]
+                "143725.nxs", "143726.nxs", "143727.nxs", "143728.nxs", "143729.nxs",
+                "140678.nxs", "140679.nxs", "140680.nxs", "140681.nxs", "140682.nxs"]
 
     def runTest(self):
 
@@ -44,6 +46,8 @@ class ILLIndirectReductionFWSTest(stresstesting.MantidStressTest):
 
         self._run_sum_interpolate()
 
+        self._run_efws_mirror_sense()
+
         self.tearDown()
 
     def _run_ifws(self):
@@ -101,3 +105,12 @@ class ILLIndirectReductionFWSTest(stresstesting.MantidStressTest):
         else:
             self.assertTrue(result[0], "Sum/interpolate should be the same for one point: "
                             + result[1].row(0)['Message'])
+
+    def _run_efws_mirror_sense(self):
+        # this tests the EFWS in mirror mode: data in 140680 is indeed split to two wings,
+        # while the others have right wing empty (though mirror sense is ON!)
+        IndirectILLReductionFWS(Run="140678:140682", OutputWorkspace="efws_mirror")
+        yData = mtd["efws_mirror_red"].getItem(0).readY(17)
+        avg = numpy.average(yData)
+        for y in numpy.nditer(yData):
+            self.assertDelta(y, avg, 0.001)
diff --git a/Testing/SystemTests/tests/analysis/ILLIndirectReductionQENS.py b/Testing/SystemTests/tests/analysis/ILLIndirectReductionQENS.py
index 112f82b579b81064913efc5a6f1baea508ee0e09..38987bb05c127fbec485f9d782a9cf2dea4f59c6 100644
--- a/Testing/SystemTests/tests/analysis/ILLIndirectReductionQENS.py
+++ b/Testing/SystemTests/tests/analysis/ILLIndirectReductionQENS.py
@@ -1,6 +1,6 @@
 import stresstesting
-from mantid.simpleapi import *
-from mantid import config
+from mantid.simpleapi import IndirectILLReductionQENS, Plus, CompareWorkspaces, GroupWorkspaces, Scale
+from mantid import config, mtd
 
 
 class ILLIndirectReductionQENSTest(stresstesting.MantidStressTest):
diff --git a/Testing/SystemTests/tests/analysis/ISIS_WISHSingleCrystalReduction.py b/Testing/SystemTests/tests/analysis/ISIS_WISHSingleCrystalReduction.py
new file mode 100644
index 0000000000000000000000000000000000000000..a984f84b0b6f2c419c6bb89dd5f4c62d73f2ee15
--- /dev/null
+++ b/Testing/SystemTests/tests/analysis/ISIS_WISHSingleCrystalReduction.py
@@ -0,0 +1,53 @@
+from mantid.simpleapi import *
+import stresstesting
+import numpy as np
+
+
+class WISHSingleCrystalPeakPredictionTest(stresstesting.MantidStressTest):
+    """
+    At the time of writing WISH users rely quite heavily on the PredictPeaks
+    algorithm. As WISH has tubes rather than rectangular detectors sometimes
+    peaks fall between the gaps in the tubes.
+
+    Here we check that PredictPeaks works on a real WISH dataset & UB. This also
+    includes an example of a peak whose center is predicted to fall between two
+    tubes.
+    """
+
+    def requiredFiles(self):
+        return ["WISH00038237.raw", "WISHPredictedSingleCrystalPeaks.nxs"]
+
+    def requiredMemoryMB(self):
+        # Need lots of memory for full WISH dataset
+        return 16000
+
+    def cleanup(self):
+        pass
+
+    def runTest(self):
+        ws = LoadRaw(Filename='WISH00038237.raw', OutputWorkspace='38237')
+        ws = ConvertUnits(ws, 'dSpacing', OutputWorkspace='38237')
+        UB = np.array([[-0.00601763,  0.07397297,  0.05865706],
+                       [ 0.05373321,  0.050198,   -0.05651455],
+                       [-0.07822144,  0.0295911,  -0.04489172]])
+
+        SetUB(ws, UB=UB)
+
+        self._peaks = PredictPeaks(ws, WavelengthMin=0.1, WavelengthMax=100,
+                                   OutputWorkspace='peaks')
+        # We specifically want to check peak -5 -1 -7 exists, so filter for it
+        self._filtered = FilterPeaks(self._peaks, "h^2+k^2+l^2", 75, '=',
+                                     OutputWorkspace='filtered')
+
+    def validate(self):
+        self.assertEqual(self._peaks.rowCount(), 510)
+        self.assertEqual(self._filtered.rowCount(), 6)
+        peak = self._filtered.row(2)
+
+        # This is an example of a peak that is known to fall between the gaps
+        # in WISH tubes. Specifically check this one is predicted to exist
+        # because past bugs have been found in the ray tracing
+        peakMatches = peak['h'] == -5 and peak['k'] == -1 and peak['l'] == -7
+        self.assertTrue(peakMatches)
+
+        return self._peaks.name(), "WISHPredictedSingleCrystalPeaks.nxs"
diff --git a/Testing/SystemTests/tests/analysis/LoadLotsOfFiles.py b/Testing/SystemTests/tests/analysis/LoadLotsOfFiles.py
index 28bca44cd1d6cc7df197b198676f82d970163ab9..414feb772f52b5745cfcbc683ae9b04feb051f55 100644
--- a/Testing/SystemTests/tests/analysis/LoadLotsOfFiles.py
+++ b/Testing/SystemTests/tests/analysis/LoadLotsOfFiles.py
@@ -45,6 +45,8 @@ BANNED_FILES = ['80_tubes_Top_and_Bottom_April_2015.xml',
                 'MASK_SANS2D_FRONT_Edges_16Mar2015.xml',
                 'MASK_SANS2D_REAR_Bottom_3_tubes_16May2014.xml',
                 'MASK_SANS2D_REAR_Edges_16Mar2015.xml',
+                'MASK_SANS2D_REAR_module2_tube12.xml',
+                'MASK_SANS2D_beam_stop_4m_x_100mm_2July2015_medium_beamstop.xml',
                 'MASK_SANS2D_BOTH_Extras_24Mar2015.xml',
                 'MASK_Tube6.xml',
                 'MASK_squareBeamstop_6x8Beam_11-October-2016.xml',
@@ -91,6 +93,7 @@ BANNED_FILES = ['80_tubes_Top_and_Bottom_April_2015.xml',
                 'poldi2015n000977.hdf',
                 'USER_SANS2D_143ZC_2p4_4m_M4_Knowles_12mm.txt',
                 'USER_LARMOR_151B_LarmorTeam_80tubes_BenchRot1p4_M4_r3699.txt',
+                'USER_SANS2D_154E_2p4_4m_M3_Xpress_8mm_SampleChanger.txt',
                 'USER_Larmor_163F_HePATest_r13038.txt',
                 'Vesuvio_IP_file_test.par',
                 'IP0004_10.par',
diff --git a/Testing/SystemTests/tests/analysis/PredictPeaksTest.py b/Testing/SystemTests/tests/analysis/PredictPeaksTest.py
index 1e7f9aba5a5878045ebd37a8d2c63f85ca54394e..bec31c000614811b010cbdc2936577c823c28e62 100644
--- a/Testing/SystemTests/tests/analysis/PredictPeaksTest.py
+++ b/Testing/SystemTests/tests/analysis/PredictPeaksTest.py
@@ -72,6 +72,8 @@ class PredictPeaksTestTOPAZ(stresstesting.MantidStressTest):
 
 
 class PredictPeaksCalculateStructureFactorsTest(stresstesting.MantidStressTest):
+    expected_num_peaks = 546
+
     def runTest(self):
         simulationWorkspace = CreateSimulationWorkspace(Instrument='WISH',
                                                         BinParams='0,1,2',
@@ -89,9 +91,9 @@ class PredictPeaksCalculateStructureFactorsTest(stresstesting.MantidStressTest):
                              MinDSpacing=0.5, MaxDSpacing=10,
                              CalculateStructureFactors=True)
 
-        self.assertEquals(peaks.getNumberPeaks(), 540)
+        self.assertEquals(peaks.getNumberPeaks(), self.expected_num_peaks)
 
-        for i in range(540):
+        for i in range(self.expected_num_peaks):
             peak = peaks.getPeak(i)
             self.assertLessThan(0.0, peak.getIntensity())
 
@@ -100,6 +102,6 @@ class PredictPeaksCalculateStructureFactorsTest(stresstesting.MantidStressTest):
                                    MinDSpacing=0.5, MaxDSpacing=10,
                                    CalculateStructureFactors=False)
 
-        for i in range(540):
+        for i in range(self.expected_num_peaks):
             peak = peaks_no_sf.getPeak(i)
             self.assertEquals(0.0, peak.getIntensity())
diff --git a/Testing/SystemTests/tests/analysis/SANSBatchReductionTest.py b/Testing/SystemTests/tests/analysis/SANSBatchReductionTest.py
new file mode 100644
index 0000000000000000000000000000000000000000..e2071b4a22e6b848d7324518ecb709c9440e8f11
--- /dev/null
+++ b/Testing/SystemTests/tests/analysis/SANSBatchReductionTest.py
@@ -0,0 +1,161 @@
+# pylint: disable=too-many-public-methods, invalid-name, too-many-arguments
+from __future__ import (absolute_import, division, print_function)
+import unittest
+import stresstesting
+from mantid.api import AnalysisDataService
+
+from sans.sans_batch import SANSBatchReduction
+from sans.user_file.user_file_state_director import UserFileStateDirectorISIS
+from sans.state.data import get_data_builder
+from sans.common.enums import (SANSFacility, ISISReductionMode, OutputMode)
+from sans.common.constants import EMPTY_NAME
+from sans.common.general_functions import create_unmanaged_algorithm
+
+
+# -----------------------------------------------
+# Tests for the SANSBatchReduction algorithm
+# -----------------------------------------------
+class SANSBatchReductionTest(unittest.TestCase):
+
+    def _run_batch_reduction(self, states, use_optimizations=False):
+        batch_reduction_alg = SANSBatchReduction()
+        try:
+            batch_reduction_alg(states, use_optimizations, OutputMode.PublishToADS)
+            did_raise = False
+        except:  # noqa
+            did_raise = True
+        self.assertFalse(did_raise)
+
+    def _compare_workspace(self, workspace, reference_file_name):
+        # Load the reference file
+        load_name = "LoadNexusProcessed"
+        load_options = {"Filename": reference_file_name,
+                        "OutputWorkspace": EMPTY_NAME}
+        load_alg = create_unmanaged_algorithm(load_name, **load_options)
+        load_alg.execute()
+        reference_workspace = load_alg.getProperty("OutputWorkspace").value
+
+        # Compare reference file with the output_workspace
+        # We need to disable the instrument comparison, it takes way too long
+        # We need to disable the sample -- Not clear why yet
+        # operation how many entries can be found in the sample logs
+        compare_name = "CompareWorkspaces"
+        compare_options = {"Workspace1": workspace,
+                           "Workspace2": reference_workspace,
+                           "Tolerance": 1e-6,
+                           "CheckInstrument": False,
+                           "CheckSample": False,
+                           "ToleranceRelErr": True,
+                           "CheckAllData": True,
+                           "CheckMasking": True,
+                           "CheckType": True,
+                           "CheckAxes": True,
+                           "CheckSpectraMap": True}
+        compare_alg = create_unmanaged_algorithm(compare_name, **compare_options)
+        compare_alg.setChild(False)
+        compare_alg.execute()
+        result = compare_alg.getProperty("Result").value
+        self.assertTrue(result)
+
+    def test_that_batch_reduction_evaluates_LAB(self):
+        # Arrange
+        # Build the data information
+        data_builder = get_data_builder(SANSFacility.ISIS)
+        data_builder.set_sample_scatter("SANS2D00034484")
+        data_builder.set_sample_transmission("SANS2D00034505")
+        data_builder.set_sample_direct("SANS2D00034461")
+        data_builder.set_can_scatter("SANS2D00034481")
+        data_builder.set_can_transmission("SANS2D00034502")
+        data_builder.set_can_direct("SANS2D00034461")
+
+        data_builder.set_calibration("TUBE_SANS2D_BOTH_31681_25Sept15.nxs")
+
+        data_info = data_builder.build()
+
+        # Get the rest of the state from the user file
+        user_file_director = UserFileStateDirectorISIS(data_info)
+        user_file_director.set_user_file("USER_SANS2D_154E_2p4_4m_M3_Xpress_8mm_SampleChanger.txt")
+        # Set the reduction mode to LAB
+        user_file_director.set_reduction_builder_reduction_mode(ISISReductionMode.LAB)
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # COMPATIBILITY BEGIN -- Remove when appropriate
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # Since we are dealing with event based data but we want to compare it with histogram data from the
+        # old reduction system we need to enable the compatibility mode
+        user_file_director.set_compatibility_builder_use_compatibility_mode(True)
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # COMPATIBILITY END
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        state = user_file_director.construct()
+
+        # Act
+        states = [state]
+        self._run_batch_reduction(states, use_optimizations=False)
+        workspace_name = "34484rear_1D_1.75_16.5"
+        output_workspace = AnalysisDataService.retrieve(workspace_name)
+
+        # Evaluate it up to a defined point
+        reference_file_name = "SANS2D_ws_D20_reference_LAB_1D.nxs"
+        self._compare_workspace(output_workspace, reference_file_name)
+
+        if AnalysisDataService.doesExist(workspace_name):
+            AnalysisDataService.remove(workspace_name)
+
+    def test_batch_reduction_on_multiperiod_file(self):
+        # Arrange
+        # Build the data information
+        data_builder = get_data_builder(SANSFacility.ISIS)
+        data_builder.set_sample_scatter("SANS2D0005512")
+
+        data_info = data_builder.build()
+
+        # Get the rest of the state from the user file
+        user_file_director = UserFileStateDirectorISIS(data_info)
+        user_file_director.set_user_file("MASKSANS2Doptions.091A")
+        # Set the reduction mode to LAB
+        user_file_director.set_reduction_builder_reduction_mode(ISISReductionMode.LAB)
+        state = user_file_director.construct()
+
+        # Act
+        states = [state]
+        self._run_batch_reduction(states, use_optimizations=False)
+
+        # Assert
+        # We only assert that the expected workspaces exist on the ADS
+        expected_workspaces = ["5512p1rear_1D_2.0_14.0Phi-45.0_45.0", "5512p2rear_1D_2.0_14.0Phi-45.0_45.0",
+                               "5512p3rear_1D_2.0_14.0Phi-45.0_45.0", "5512p4rear_1D_2.0_14.0Phi-45.0_45.0",
+                               "5512p5rear_1D_2.0_14.0Phi-45.0_45.0", "5512p6rear_1D_2.0_14.0Phi-45.0_45.0",
+                               "5512p7rear_1D_2.0_14.0Phi-45.0_45.0", "5512p8rear_1D_2.0_14.0Phi-45.0_45.0",
+                               "5512p9rear_1D_2.0_14.0Phi-45.0_45.0", "5512p10rear_1D_2.0_14.0Phi-45.0_45.0",
+                               "5512p11rear_1D_2.0_14.0Phi-45.0_45.0", "5512p12rear_1D_2.0_14.0Phi-45.0_45.0",
+                               "5512p13rear_1D_2.0_14.0Phi-45.0_45.0"]
+        for element in expected_workspaces:
+            self.assertTrue(AnalysisDataService.doesExist(element))
+
+        # Clean up
+        for element in expected_workspaces:
+            AnalysisDataService.remove(element)
+
+
+class SANSBatchReductionRunnerTest(stresstesting.MantidStressTest):
+    def __init__(self):
+        stresstesting.MantidStressTest.__init__(self)
+        self._success = False
+
+    def runTest(self):
+        suite = unittest.TestSuite()
+        suite.addTest(unittest.makeSuite(SANSBatchReductionTest, 'test'))
+        runner = unittest.TextTestRunner()
+        res = runner.run(suite)
+        if res.wasSuccessful():
+            self._success = True
+
+    def requiredMemoryMB(self):
+        return 2000
+
+    def validate(self):
+        return self._success
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Testing/SystemTests/tests/analysis/SANSReductionCoreTest.py b/Testing/SystemTests/tests/analysis/SANSReductionCoreTest.py
new file mode 100644
index 0000000000000000000000000000000000000000..166ecf5130b36bed7ec3a8137befcc577468053d
--- /dev/null
+++ b/Testing/SystemTests/tests/analysis/SANSReductionCoreTest.py
@@ -0,0 +1,192 @@
+# pylint: disable=too-many-public-methods, invalid-name, too-many-arguments
+
+from __future__ import (absolute_import, division, print_function)
+import unittest
+import os
+import stresstesting
+
+import mantid
+from mantid.api import AlgorithmManager
+
+from sans.state.data import get_data_builder
+from sans.common.enums import (DetectorType, DataType, SANSFacility)
+from sans.user_file.user_file_state_director import UserFileStateDirectorISIS
+from sans.common.constants import EMPTY_NAME
+from sans.common.general_functions import create_unmanaged_algorithm
+
+
+# -----------------------------------------------
+# Tests for the SANSReductionCore algorithm
+# -----------------------------------------------
+class SANSReductionCoreTest(unittest.TestCase):
+    def _load_workspace(self, state):
+        load_alg = AlgorithmManager.createUnmanaged("SANSLoad")
+        load_alg.setChild(True)
+        load_alg.initialize()
+
+        state_dict = state.property_manager
+        load_alg.setProperty("SANSState", state_dict)
+        load_alg.setProperty("PublishToCache", False)
+        load_alg.setProperty("UseCached", False)
+        load_alg.setProperty("MoveWorkspace", False)
+        load_alg.setProperty("SampleScatterWorkspace", EMPTY_NAME)
+        load_alg.setProperty("SampleScatterMonitorWorkspace", EMPTY_NAME)
+        if state.data.sample_transmission:
+            load_alg.setProperty("SampleTransmissionWorkspace", EMPTY_NAME)
+        if state.data.sample_direct:
+            load_alg.setProperty("SampleDirectWorkspace", EMPTY_NAME)
+
+        # Act
+        load_alg.execute()
+        self.assertTrue(load_alg.isExecuted())
+        sample_scatter = load_alg.getProperty("SampleScatterWorkspace").value
+        sample_scatter_monitor_workspace = load_alg.getProperty("SampleScatterMonitorWorkspace").value
+        if state.data.sample_transmission:
+            transmission_workspace = load_alg.getProperty("SampleTransmissionWorkspace").value
+        else:
+            transmission_workspace = None
+        if state.data.sample_direct:
+            direct_workspace = load_alg.getProperty("SampleDirectWorkspace").value
+        else:
+            direct_workspace = None
+        return sample_scatter, sample_scatter_monitor_workspace, transmission_workspace, direct_workspace
+
+    def _run_reduction_core(self, state, workspace, monitor, transmission=None, direct=None,
+                            detector_type=DetectorType.LAB, component=DataType.Sample):
+        reduction_core_alg = AlgorithmManager.createUnmanaged("SANSReductionCore")
+        reduction_core_alg.setChild(True)
+        reduction_core_alg.initialize()
+
+        state_dict = state.property_manager
+        reduction_core_alg.setProperty("SANSState", state_dict)
+        reduction_core_alg.setProperty("ScatterWorkspace", workspace)
+        reduction_core_alg.setProperty("ScatterMonitorWorkspace", monitor)
+
+        if transmission:
+            reduction_core_alg.setProperty("TransmissionWorkspace", transmission)
+
+        if direct:
+            reduction_core_alg.setProperty("DirectWorkspace", direct)
+
+        reduction_core_alg.setProperty("Component", DetectorType.to_string(detector_type))
+        reduction_core_alg.setProperty("DataType", DataType.to_string(component))
+
+        reduction_core_alg.setProperty("OutputWorkspace", EMPTY_NAME)
+
+        # Act
+        reduction_core_alg.execute()
+        self.assertTrue(reduction_core_alg.isExecuted())
+        return reduction_core_alg
+
+    def _compare_workspace(self, workspace, reference_file_name):
+        # Load the reference file
+        load_name = "LoadNexusProcessed"
+        load_options = {"Filename": reference_file_name,
+                        "OutputWorkspace": EMPTY_NAME}
+        load_alg = create_unmanaged_algorithm(load_name, **load_options)
+        load_alg.execute()
+        reference_workspace = load_alg.getProperty("OutputWorkspace").value
+
+        # Save the workspace out and reload it again. This makes equalizes it with the reference workspace
+        f_name = os.path.join(mantid.config.getString('defaultsave.directory'),
+                              'SANS_temp_single_core_reduction_testout.nxs')
+
+        save_name = "SaveNexus"
+        save_options = {"Filename": f_name,
+                        "InputWorkspace": workspace}
+        save_alg = create_unmanaged_algorithm(save_name, **save_options)
+        save_alg.execute()
+        load_alg.setProperty("Filename", f_name)
+        load_alg.setProperty("OutputWorkspace", EMPTY_NAME)
+        load_alg.execute()
+
+        ws = load_alg.getProperty("OutputWorkspace").value
+
+        # Compare reference file with the output_workspace
+        # We need to disable the instrument comparison, it takes way too long
+        # We need to disable the sample -- since the sample has been modified (more logs are being written)
+        # operation how many entries can be found in the sample logs
+        compare_name = "CompareWorkspaces"
+        compare_options = {"Workspace1": ws,
+                           "Workspace2": reference_workspace,
+                           "Tolerance": 1e-6,
+                           "CheckInstrument": False,
+                           "CheckSample": False,
+                           "ToleranceRelErr": True,
+                           "CheckAllData": True,
+                           "CheckMasking": True,
+                           "CheckType": True,
+                           "CheckAxes": True,
+                           "CheckSpectraMap": True}
+        compare_alg = create_unmanaged_algorithm(compare_name, **compare_options)
+        compare_alg.setChild(False)
+        compare_alg.execute()
+        result = compare_alg.getProperty("Result").value
+        self.assertTrue(result)
+
+        # Remove file
+        if os.path.exists(f_name):
+            os.remove(f_name)
+
+    def test_that_reduction_core_evaluates_LAB(self):
+        # Arrange
+        # Build the data information
+        data_builder = get_data_builder(SANSFacility.ISIS)
+        data_builder.set_sample_scatter("SANS2D00034484")
+        data_builder.set_sample_transmission("SANS2D00034505")
+        data_builder.set_sample_direct("SANS2D00034461")
+        data_builder.set_calibration("TUBE_SANS2D_BOTH_31681_25Sept15.nxs")
+        data_state = data_builder.build()
+
+        # Get the rest of the state from the user file
+        user_file_director = UserFileStateDirectorISIS(data_state)
+        user_file_director.set_user_file("USER_SANS2D_154E_2p4_4m_M3_Xpress_8mm_SampleChanger.txt")
+
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # COMPATIBILITY BEGIN -- Remove when appropriate
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # Since we are dealing with event based data but we want to compare it with histogram data from the
+        # old reduction system we need to enable the compatibility mode
+        user_file_director.set_compatibility_builder_use_compatibility_mode(True)
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # COMPATIBILITY END
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+        # Construct the final state
+        state = user_file_director.construct()
+
+        # Load the sample workspaces
+        workspace, workspace_monitor, transmission_workspace, direct_workspace = self._load_workspace(state)
+
+        # Act
+        reduction_core_alg = self._run_reduction_core(state, workspace, workspace_monitor,
+                                                      transmission_workspace, direct_workspace)
+        output_workspace = reduction_core_alg.getProperty("OutputWorkspace").value
+
+        # Evaluate it up to a defined point
+        reference_file_name = "SANS2D_ws_D20_reference.nxs"
+        self._compare_workspace(output_workspace, reference_file_name)
+
+
+class SANSReductionCoreRunnerTest(stresstesting.MantidStressTest):
+    def __init__(self):
+        stresstesting.MantidStressTest.__init__(self)
+        self._success = False
+
+    def runTest(self):
+        suite = unittest.TestSuite()
+        suite.addTest(unittest.makeSuite(SANSReductionCoreTest, 'test'))
+        runner = unittest.TextTestRunner()
+        res = runner.run(suite)
+        if res.wasSuccessful():
+            self._success = True
+
+    def requiredMemoryMB(self):
+        return 2000
+
+    def validate(self):
+        return self._success
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Testing/SystemTests/tests/analysis/SANSSingleReductionTest.py b/Testing/SystemTests/tests/analysis/SANSSingleReductionTest.py
new file mode 100644
index 0000000000000000000000000000000000000000..1f504cfe858a3e595c0019db91d41d2d0f52b93b
--- /dev/null
+++ b/Testing/SystemTests/tests/analysis/SANSSingleReductionTest.py
@@ -0,0 +1,368 @@
+# pylint: disable=too-many-public-methods, invalid-name, too-many-arguments
+
+from __future__ import (absolute_import, division, print_function)
+import unittest
+import stresstesting
+
+import mantid  # noqa
+from mantid.api import AlgorithmManager
+from sans.user_file.user_file_state_director import UserFileStateDirectorISIS
+from sans.state.data import get_data_builder
+from sans.common.enums import (SANSFacility, ISISReductionMode, ReductionDimensionality, FitModeForMerge)
+from sans.common.constants import EMPTY_NAME
+from sans.common.general_functions import create_unmanaged_algorithm
+
+
+# -----------------------------------------------
+# Tests for the SANSSingleReduction algorithm
+# -----------------------------------------------
+class SANSSingleReductionTest(unittest.TestCase):
+    def _load_workspace(self, state):
+        load_alg = AlgorithmManager.createUnmanaged("SANSLoad")
+        load_alg.setChild(True)
+        load_alg.initialize()
+
+        state_dict = state.property_manager
+        load_alg.setProperty("SANSState", state_dict)
+        load_alg.setProperty("PublishToCache", False)
+        load_alg.setProperty("UseCached", False)
+        load_alg.setProperty("MoveWorkspace", False)
+
+        load_alg.setProperty("SampleScatterWorkspace", EMPTY_NAME)
+        load_alg.setProperty("SampleScatterMonitorWorkspace", EMPTY_NAME)
+        load_alg.setProperty("SampleTransmissionWorkspace", EMPTY_NAME)
+        load_alg.setProperty("SampleDirectWorkspace", EMPTY_NAME)
+
+        load_alg.setProperty("CanScatterWorkspace", EMPTY_NAME)
+        load_alg.setProperty("CanScatterMonitorWorkspace", EMPTY_NAME)
+        load_alg.setProperty("CanTransmissionWorkspace", EMPTY_NAME)
+        load_alg.setProperty("CanDirectWorkspace", EMPTY_NAME)
+
+        # Act
+        load_alg.execute()
+        self.assertTrue(load_alg.isExecuted())
+        sample_scatter = load_alg.getProperty("SampleScatterWorkspace").value
+        sample_scatter_monitor_workspace = load_alg.getProperty("SampleScatterMonitorWorkspace").value
+        transmission_workspace = load_alg.getProperty("SampleTransmissionWorkspace").value
+        direct_workspace = load_alg.getProperty("SampleDirectWorkspace").value
+
+        can_scatter_workspace = load_alg.getProperty("CanScatterWorkspace").value
+        can_scatter_monitor_workspace = load_alg.getProperty("CanScatterMonitorWorkspace").value
+        can_transmission_workspace = load_alg.getProperty("CanTransmissionWorkspace").value
+        can_direct_workspace = load_alg.getProperty("CanDirectWorkspace").value
+
+        return sample_scatter, sample_scatter_monitor_workspace, transmission_workspace, direct_workspace, \
+               can_scatter_workspace, can_scatter_monitor_workspace, can_transmission_workspace, can_direct_workspace  # noqa
+
+    def _run_single_reduction(self, state, sample_scatter, sample_monitor, sample_transmission=None, sample_direct=None,
+                              can_scatter=None, can_monitor=None, can_transmission=None, can_direct=None,
+                              output_settings=None):
+        single_reduction_name = "SANSSingleReduction"
+        state_dict = state.property_manager
+
+        single_reduction_options = {"SANSState": state_dict,
+                                    "SampleScatterWorkspace": sample_scatter,
+                                    "SampleScatterMonitorWorkspace": sample_monitor,
+                                    "UseOptimizations": False}
+        if sample_transmission:
+            single_reduction_options.update({"SampleTransmissionWorkspace": sample_transmission})
+
+        if sample_direct:
+            single_reduction_options.update({"SampleDirectWorkspace": sample_direct})
+
+        if can_scatter:
+            single_reduction_options.update({"CanScatterWorkspace": can_scatter})
+
+        if can_monitor:
+            single_reduction_options.update({"CanScatterMonitorWorkspace": can_monitor})
+
+        if can_transmission:
+            single_reduction_options.update({"CanTransmissionWorkspace": can_transmission})
+
+        if can_direct:
+            single_reduction_options.update({"CanDirectWorkspace": can_direct})
+
+        if output_settings:
+            single_reduction_options.update(output_settings)
+
+        single_reduction_alg = create_unmanaged_algorithm(single_reduction_name, **single_reduction_options)
+
+        # Act
+        single_reduction_alg.execute()
+        self.assertTrue(single_reduction_alg.isExecuted())
+        return single_reduction_alg
+
+    def _compare_workspace(self, workspace, reference_file_name):
+        # Load the reference file
+        load_name = "LoadNexusProcessed"
+        load_options = {"Filename": reference_file_name,
+                        "OutputWorkspace": EMPTY_NAME}
+        load_alg = create_unmanaged_algorithm(load_name, **load_options)
+        load_alg.execute()
+        reference_workspace = load_alg.getProperty("OutputWorkspace").value
+
+        # Compare reference file with the output_workspace
+        # We need to disable the instrument comparison, it takes way too long
+        # We need to disable the sample -- Not clear why yet
+        # operation how many entries can be found in the sample logs
+        compare_name = "CompareWorkspaces"
+        compare_options = {"Workspace1": workspace,
+                           "Workspace2": reference_workspace,
+                           "Tolerance": 1e-6,
+                           "CheckInstrument": False,
+                           "CheckSample": False,
+                           "ToleranceRelErr": True,
+                           "CheckAllData": True,
+                           "CheckMasking": True,
+                           "CheckType": True,
+                           "CheckAxes": True,
+                           "CheckSpectraMap": True}
+        compare_alg = create_unmanaged_algorithm(compare_name, **compare_options)
+        compare_alg.setChild(False)
+        compare_alg.execute()
+        result = compare_alg.getProperty("Result").value
+        self.assertTrue(result)
+
+    def test_that_single_reduction_evaluates_LAB(self):
+        # Arrange
+        # Build the data information
+        data_builder = get_data_builder(SANSFacility.ISIS)
+        data_builder.set_sample_scatter("SANS2D00034484")
+        data_builder.set_sample_transmission("SANS2D00034505")
+        data_builder.set_sample_direct("SANS2D00034461")
+        data_builder.set_can_scatter("SANS2D00034481")
+        data_builder.set_can_transmission("SANS2D00034502")
+        data_builder.set_can_direct("SANS2D00034461")
+
+        data_builder.set_calibration("TUBE_SANS2D_BOTH_31681_25Sept15.nxs")
+        data_info = data_builder.build()
+
+        # Get the rest of the state from the user file
+        user_file_director = UserFileStateDirectorISIS(data_info)
+        user_file_director.set_user_file("USER_SANS2D_154E_2p4_4m_M3_Xpress_8mm_SampleChanger.txt")
+        # Set the reduction mode to LAB
+        user_file_director.set_reduction_builder_reduction_mode(ISISReductionMode.LAB)
+
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # COMPATIBILITY BEGIN -- Remove when appropriate
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # Since we are dealing with event based data but we want to compare it with histogram data from the
+        # old reduction system we need to enable the compatibility mode
+        user_file_director.set_compatibility_builder_use_compatibility_mode(True)
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # COMPATIBILITY END
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        state = user_file_director.construct()
+
+        # Load the sample workspaces
+        sample, sample_monitor, transmission_workspace, direct_workspace, can, can_monitor, \
+        can_transmission, can_direct = self._load_workspace(state)  # noqa
+
+        # Act
+        output_settings = {"OutputWorkspaceLAB": EMPTY_NAME}
+        single_reduction_alg = self._run_single_reduction(state, sample_scatter=sample,
+                                                          sample_transmission=transmission_workspace,
+                                                          sample_direct=direct_workspace,
+                                                          sample_monitor=sample_monitor,
+                                                          can_scatter=can,
+                                                          can_monitor=can_monitor,
+                                                          can_transmission=can_transmission,
+                                                          can_direct=can_direct,
+                                                          output_settings=output_settings)
+        output_workspace = single_reduction_alg.getProperty("OutputWorkspaceLAB").value
+
+        # Compare the output of the reduction with the reference
+        reference_file_name = "SANS2D_ws_D20_reference_LAB_1D.nxs"
+        self._compare_workspace(output_workspace, reference_file_name)
+
+    def test_that_single_reduction_evaluates_HAB(self):
+        # Arrange
+        # Build the data information
+        data_builder = get_data_builder(SANSFacility.ISIS)
+        data_builder.set_sample_scatter("SANS2D00034484")
+        data_builder.set_sample_transmission("SANS2D00034505")
+        data_builder.set_sample_direct("SANS2D00034461")
+        data_builder.set_can_scatter("SANS2D00034481")
+        data_builder.set_can_transmission("SANS2D00034502")
+        data_builder.set_can_direct("SANS2D00034461")
+
+        data_builder.set_calibration("TUBE_SANS2D_BOTH_31681_25Sept15.nxs")
+        data_info = data_builder.build()
+
+        # Get the rest of the state from the user file
+        user_file_director = UserFileStateDirectorISIS(data_info)
+        user_file_director.set_user_file("USER_SANS2D_154E_2p4_4m_M3_Xpress_8mm_SampleChanger.txt")
+        # Set the reduction mode to LAB
+        user_file_director.set_reduction_builder_reduction_mode(ISISReductionMode.HAB)
+
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # COMPATIBILITY BEGIN -- Remove when appropriate
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # Since we are dealing with event based data but we want to compare it with histogram data from the
+        # old reduction system we need to enable the compatibility mode
+        user_file_director.set_compatibility_builder_use_compatibility_mode(True)
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # COMPATIBILITY END
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        state = user_file_director.construct()
+
+        # Load the sample workspaces
+        sample, sample_monitor, transmission_workspace, direct_workspace, can, can_monitor,\
+        can_transmission, can_direct = self._load_workspace(state)  # noqa
+
+        # Act
+        output_settings = {"OutputWorkspaceHAB": EMPTY_NAME}
+        single_reduction_alg = self._run_single_reduction(state, sample_scatter=sample,
+                                                          sample_transmission=transmission_workspace,
+                                                          sample_direct=direct_workspace,
+                                                          sample_monitor=sample_monitor,
+                                                          can_scatter=can,
+                                                          can_monitor=can_monitor,
+                                                          can_transmission=can_transmission,
+                                                          can_direct=can_direct,
+                                                          output_settings=output_settings)
+        output_workspace = single_reduction_alg.getProperty("OutputWorkspaceHAB").value
+
+        # # Compare the output of the reduction with the reference
+        reference_file_name = "SANS2D_ws_D20_reference_HAB_1D.nxs"
+        self._compare_workspace(output_workspace, reference_file_name)
+
+    def test_that_single_reduction_evaluates_merged(self):
+        # Arrange
+        # Build the data information
+        data_builder = get_data_builder(SANSFacility.ISIS)
+        data_builder.set_sample_scatter("SANS2D00034484")
+        data_builder.set_sample_transmission("SANS2D00034505")
+        data_builder.set_sample_direct("SANS2D00034461")
+        data_builder.set_can_scatter("SANS2D00034481")
+        data_builder.set_can_transmission("SANS2D00034502")
+        data_builder.set_can_direct("SANS2D00034461")
+
+        data_builder.set_calibration("TUBE_SANS2D_BOTH_31681_25Sept15.nxs")
+        data_info = data_builder.build()
+
+        # Get the rest of the state from the user file
+        user_file_director = UserFileStateDirectorISIS(data_info)
+        user_file_director.set_user_file("USER_SANS2D_154E_2p4_4m_M3_Xpress_8mm_SampleChanger.txt")
+        # Set the reduction mode to LAB
+        user_file_director.set_reduction_builder_reduction_mode(ISISReductionMode.Merged)
+        user_file_director.set_reduction_builder_merge_fit_mode(FitModeForMerge.Both)
+        user_file_director.set_reduction_builder_merge_scale(1.0)
+        user_file_director.set_reduction_builder_merge_shift(0.0)
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # COMPATIBILITY BEGIN -- Remove when appropriate
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # Since we are dealing with event based data but we want to compare it with histogram data from the
+        # old reduction system we need to enable the compatibility mode
+        user_file_director.set_compatibility_builder_use_compatibility_mode(True)
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # COMPATIBILITY END
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        state = user_file_director.construct()
+
+        # Load the sample workspaces
+        sample, sample_monitor, transmission_workspace, direct_workspace, \
+        can, can_monitor, can_transmission, can_direct = self._load_workspace(state)  # noqa
+
+        # Act
+        output_settings = {"OutputWorkspaceMerged": EMPTY_NAME}
+        single_reduction_alg = self._run_single_reduction(state, sample_scatter=sample,
+                                                          sample_transmission=transmission_workspace,
+                                                          sample_direct=direct_workspace,
+                                                          sample_monitor=sample_monitor,
+                                                          can_scatter=can,
+                                                          can_monitor=can_monitor,
+                                                          can_transmission=can_transmission,
+                                                          can_direct=can_direct,
+                                                          output_settings=output_settings)
+        output_workspace = single_reduction_alg.getProperty("OutputWorkspaceMerged").value
+        output_scale_factor = single_reduction_alg.getProperty("OutScaleFactor").value
+        output_shift_factor = single_reduction_alg.getProperty("OutShiftFactor").value
+
+        tolerance = 1e-6
+        expected_shift = 0.00278452
+        expected_scale = 0.81439154
+
+        self.assertTrue(abs(expected_shift - output_shift_factor) < tolerance)
+        self.assertTrue(abs(expected_scale - output_scale_factor) < tolerance)
+
+        # Compare the output of the reduction with the reference
+        reference_file_name = "SANS2D_ws_D20_reference_Merged_1D.nxs"
+        self._compare_workspace(output_workspace, reference_file_name)
+
+    def test_that_single_reduction_evaluates_LAB_for_2D_reduction(self):
+        # Arrange
+        # Build the data information
+        data_builder = get_data_builder(SANSFacility.ISIS)
+        data_builder.set_sample_scatter("SANS2D00034484")
+        data_builder.set_sample_transmission("SANS2D00034505")
+        data_builder.set_sample_direct("SANS2D00034461")
+        data_builder.set_can_scatter("SANS2D00034481")
+        data_builder.set_can_transmission("SANS2D00034502")
+        data_builder.set_can_direct("SANS2D00034461")
+
+        data_builder.set_calibration("TUBE_SANS2D_BOTH_31681_25Sept15.nxs")
+        data_info = data_builder.build()
+
+        # Get the rest of the state from the user file
+        user_file_director = UserFileStateDirectorISIS(data_info)
+        user_file_director.set_user_file("USER_SANS2D_154E_2p4_4m_M3_Xpress_8mm_SampleChanger.txt")
+        # Set the reduction mode to LAB
+        user_file_director.set_reduction_builder_reduction_mode(ISISReductionMode.LAB)
+        user_file_director.set_reduction_builder_reduction_dimensionality(ReductionDimensionality.TwoDim)
+        user_file_director.set_convert_to_q_builder_reduction_dimensionality(ReductionDimensionality.TwoDim)
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # COMPATIBILITY BEGIN -- Remove when appropriate
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # Since we are dealing with event based data but we want to compare it with histogram data from the
+        # old reduction system we need to enable the compatibility mode
+        user_file_director.set_compatibility_builder_use_compatibility_mode(True)
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # COMPATIBILITY END
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        state = user_file_director.construct()
+
+        # Load the sample workspaces
+        sample, sample_monitor, transmission_workspace, direct_workspace, can, can_monitor, \
+        can_transmission, can_direct = self._load_workspace(state)  # noqa
+
+        # Act
+        output_settings = {"OutputWorkspaceLAB": EMPTY_NAME}
+        single_reduction_alg = self._run_single_reduction(state, sample_scatter=sample,
+                                                          sample_transmission=transmission_workspace,
+                                                          sample_direct=direct_workspace,
+                                                          sample_monitor=sample_monitor,
+                                                          can_scatter=can,
+                                                          can_monitor=can_monitor,
+                                                          can_transmission=can_transmission,
+                                                          can_direct=can_direct,
+                                                          output_settings=output_settings)
+        output_workspace = single_reduction_alg.getProperty("OutputWorkspaceLAB").value
+
+        # Compare the output of the reduction with the reference
+        reference_file_name = "SANS2D_ws_D20_reference_LAB_2D.nxs"
+        self._compare_workspace(output_workspace, reference_file_name)
+
+
+class SANSReductionRunnerTest(stresstesting.MantidStressTest):
+    def __init__(self):
+        stresstesting.MantidStressTest.__init__(self)
+        self._success = False
+
+    def runTest(self):
+        suite = unittest.TestSuite()
+        suite.addTest(unittest.makeSuite(SANSSingleReductionTest, 'test'))
+        runner = unittest.TextTestRunner()
+        res = runner.run(suite)
+        if res.wasSuccessful():
+            self._success = True
+
+    def requiredMemoryMB(self):
+        return 2000
+
+    def validate(self):
+        return self._success
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Testing/SystemTests/tests/analysis/SortHKLTest.py b/Testing/SystemTests/tests/analysis/SortHKLTest.py
index 39c2e6c91ccb466139f09f06d2c0410f829c5f2c..1791db2de5d615bd92e6e73c2eee4322e2f47724 100644
--- a/Testing/SystemTests/tests/analysis/SortHKLTest.py
+++ b/Testing/SystemTests/tests/analysis/SortHKLTest.py
@@ -5,25 +5,8 @@ from mantid.simpleapi import *
 from mantid.geometry import PointGroupFactory
 
 
-class SortHKLTest(stresstesting.MantidStressTest):
-    ''' System test for SortHKL
-
-    This system test compares some of the output of SortHKL to statistics produced
-    by running the program SORTAV [1] on the same data set.
-
-    Since SORTAV processes HKL-files and those are small, the peaks are loaded from
-    HKL-files and put into an empty PeaksWorkspace. Two additional files are read
-    for the test, the parameters for SetUB in JSON-format and some of the output from
-    the sortav.lp file which contains the output after a SORTAV-run.
-
-    This system test is there to ensure the correctness what SortHKL does against
-    the output of an established program.
-
-    [1] SORTAV: ftp://ftp.hwi.buffalo.edu/pub/Blessing/Drear/sortav.use
-        (and references therein).
-    '''
-
-    def runTest(self):
+class HKLStatisticsTestMixin(object):
+    def _init_test_data(self):
         self._ws = CreateSimulationWorkspace(Instrument='TOPAZ',
                                              BinParams='0,10000,20000',
                                              UnitX='TOF',
@@ -41,18 +24,6 @@ class SortHKLTest(stresstesting.MantidStressTest):
         self._template_ub = 'ub_parameters_{0}.json'
         self._template_statistics = 'statistics_{0}.txt'
 
-        self.test_SortHKLStatistics()
-
-    def test_SortHKLStatistics(self):
-        for space_group in self._space_groups:
-            ub_parameters = self._load_ub_parameters(space_group)
-            reflections = self._load_reflections(space_group, ub_parameters)
-            statistics, sorted_hkls = self._run_sort_hkl(reflections, space_group)
-            reference_statistics = self._load_reference_statistics(space_group)
-
-            self._compare_statistics(statistics, reference_statistics)
-            self._check_sorted_hkls_consistency(sorted_hkls, space_group)
-
     def _load_ub_parameters(self, space_group):
         filename = FileFinder.Instance().getFullPath(self._base_directory + self._template_ub.format(space_group))
 
@@ -83,17 +54,6 @@ class SortHKLTest(stresstesting.MantidStressTest):
 
         return actual_hkls
 
-    def _run_sort_hkl(self, reflections, space_group):
-        point_group_name = self._get_point_group(space_group).getName()
-        centering_name = self._centering_map[space_group[0]]
-
-        # pylint: disable=unused-variable
-        sorted_hkls, chi2, statistics = SortHKL(InputWorkspace=reflections,
-                                                PointGroup=point_group_name,
-                                                LatticeCentering=centering_name)
-
-        return statistics.row(0), sorted_hkls
-
     def _get_point_group(self, space_group):
         return PointGroupFactory.createPointGroup(space_group[1:].replace('_', '/'))
 
@@ -116,6 +76,50 @@ class SortHKLTest(stresstesting.MantidStressTest):
 
         return overall_statistics
 
+
+class SortHKLTest(HKLStatisticsTestMixin, stresstesting.MantidStressTest):
+    ''' System test for SortHKL
+
+    This system test compares some of the output of SortHKL to statistics produced
+    by running the program SORTAV [1] on the same data set.
+
+    Since SORTAV processes HKL-files and those are small, the peaks are loaded from
+    HKL-files and put into an empty PeaksWorkspace. Two additional files are read
+    for the test, the parameters for SetUB in JSON-format and some of the output from
+    the sortav.lp file which contains the output after a SORTAV-run.
+
+    This system test is there to ensure the correctness what SortHKL does against
+    the output of an established program.
+
+    [1] SORTAV: ftp://ftp.hwi.buffalo.edu/pub/Blessing/Drear/sortav.use
+        (and references therein).
+    '''
+
+    def runTest(self):
+        self._init_test_data()
+        self.test_SortHKLStatistics()
+
+    def test_SortHKLStatistics(self):
+        for space_group in self._space_groups:
+            ub_parameters = self._load_ub_parameters(space_group)
+            reflections = self._load_reflections(space_group, ub_parameters)
+            statistics, sorted_hkls = self._run_sort_hkl(reflections, space_group)
+            reference_statistics = self._load_reference_statistics(space_group)
+
+            self._compare_statistics(statistics, reference_statistics)
+            self._check_sorted_hkls_consistency(sorted_hkls, space_group)
+
+    def _run_sort_hkl(self, reflections, space_group):
+        point_group_name = self._get_point_group(space_group).getName()
+        centering_name = self._centering_map[space_group[0]]
+
+        # pylint: disable=unused-variable
+        sorted_hkls, chi2, statistics = SortHKL(InputWorkspace=reflections,
+                                                PointGroup=point_group_name,
+                                                LatticeCentering=centering_name)
+
+        return statistics.row(0), sorted_hkls
+
     def _compare_statistics(self, statistics, reference_statistics):
         self.assertEquals(round(statistics['Multiplicity'], 1), round(reference_statistics['<N>'], 1))
         self.assertEquals(round(statistics['Rpim'], 2), round(100.0 * reference_statistics['Rm'], 2))
diff --git a/Testing/SystemTests/tests/analysis/SphinxWarnings.py b/Testing/SystemTests/tests/analysis/SphinxWarnings.py
index 4155b6fa1f77c579f7be004a9b511e8f7a3405e2..b31e535a9cb43930336aed6f56678d4a55cbf4b8 100644
--- a/Testing/SystemTests/tests/analysis/SphinxWarnings.py
+++ b/Testing/SystemTests/tests/analysis/SphinxWarnings.py
@@ -20,6 +20,7 @@ class SphinxWarnings(stresstesting.MantidStressTest):
                                 'Diffraction',
                                 'Events',
                                 'Examples',
+                                'ILL',
                                 'ISIS',
                                 'Inelastic',
                                 'MDAlgorithms',
diff --git a/Testing/SystemTests/tests/analysis/VesuvioCommandsTest.py b/Testing/SystemTests/tests/analysis/VesuvioCommandsTest.py
index 52185ccaef5f90129730fefdc05d5c03d6724c15..d74cb6bc5799f830f3c3d803673922fe0e800b96 100644
--- a/Testing/SystemTests/tests/analysis/VesuvioCommandsTest.py
+++ b/Testing/SystemTests/tests/analysis/VesuvioCommandsTest.py
@@ -11,7 +11,7 @@ from mantid.simpleapi import *
 from vesuvio.commands import fit_tof
 
 
-#=====================================Helper Function=================================
+# =====================================Helper Function=================================
 
 def _is_old_boost_version():
     # It appears that a difference in boost version is causing different
@@ -32,7 +32,7 @@ def _create_test_flags(background, multivariate=False):
     flags['fit_mode'] = 'spectrum'
     flags['spectra'] = '135'
     if multivariate:
-        mass1 = {'value': 1.0079, 'function': 'MultivariateGaussian', 'SigmaX':5, 'SigmaY':5, 'SigmaZ':5}
+        mass1 = {'value': 1.0079, 'function': 'MultivariateGaussian', 'SigmaX': 5, 'SigmaY': 5, 'SigmaZ': 5}
     else:
         mass1 = {'value': 1.0079, 'function': 'GramCharlier', 'width': [2, 5, 7],
                  'hermite_coeffs': [1, 0, 0], 'k_free': 0, 'sears_flag': 1}
@@ -42,7 +42,7 @@ def _create_test_flags(background, multivariate=False):
     flags['masses'] = [mass1, mass2, mass3, mass4]
     flags['intensity_constraints'] = [0, 1, 0, -4]
     if background:
-        flags['background'] = {'function': 'Polynomial', 'order':3}
+        flags['background'] = {'function': 'Polynomial', 'order': 3}
     else:
         flags['background'] = None
     flags['ip_file'] = 'Vesuvio_IP_file_test.par'
@@ -78,11 +78,11 @@ def _get_peak_height_and_index(workspace, ws_index):
 
     return peak_height, peak_bin
 
-#====================================================================================
 
+# ====================================================================================
 
-class FitSingleSpectrumNoBackgroundTest(stresstesting.MantidStressTest):
 
+class FitSingleSpectrumNoBackgroundTest(stresstesting.MantidStressTest):
     _fit_results = None
 
     def runTest(self):
@@ -96,7 +96,7 @@ class FitSingleSpectrumNoBackgroundTest(stresstesting.MantidStressTest):
 
         fitted_wsg = self._fit_results[0]
         self.assertTrue(isinstance(fitted_wsg, WorkspaceGroup))
-        self.assertEqual(2, len(fitted_wsg))
+        self.assertEqual(1, len(fitted_wsg))
 
         fitted_ws = fitted_wsg[0]
         self.assertTrue(isinstance(fitted_ws, MatrixWorkspace))
@@ -128,7 +128,8 @@ class FitSingleSpectrumNoBackgroundTest(stresstesting.MantidStressTest):
         exit_iteration = self._fit_results[3]
         self.assertTrue(isinstance(exit_iteration, int))
 
-#====================================================================================
+
+# ====================================================================================
 
 
 class FitSingleSpectrumBivariateGaussianTiesTest(stresstesting.MantidStressTest):
@@ -145,17 +146,17 @@ class FitSingleSpectrumBivariateGaussianTiesTest(stresstesting.MantidStressTest)
         self._fit_results = fit_tof(runs, flags)
 
     def validate(self):
-        #Get fit workspace
+        # Get fit workspace
         fit_params = mtd['15039-15045_params_iteration_1']
         f0_sigma_x = fit_params.readY(2)[0]
         f0_sigma_y = fit_params.readY(3)[0]
         self.assertAlmostEqual(f0_sigma_x, f0_sigma_y)
 
-#====================================================================================
 
+# ====================================================================================
 
-class SingleSpectrumBackground(stresstesting.MantidStressTest):
 
+class SingleSpectrumBackground(stresstesting.MantidStressTest):
     _fit_results = None
 
     def runTest(self):
@@ -169,7 +170,7 @@ class SingleSpectrumBackground(stresstesting.MantidStressTest):
 
         fitted_wsg = self._fit_results[0]
         self.assertTrue(isinstance(fitted_wsg, WorkspaceGroup))
-        self.assertEqual(2, len(fitted_wsg))
+        self.assertEqual(1, len(fitted_wsg))
 
         fitted_ws = fitted_wsg[0]
         self.assertTrue(isinstance(fitted_ws, MatrixWorkspace))
@@ -203,11 +204,11 @@ class SingleSpectrumBackground(stresstesting.MantidStressTest):
         exit_iteration = self._fit_results[3]
         self.assertTrue(isinstance(exit_iteration, int))
 
-#====================================================================================
 
+# ====================================================================================
 
-class BankByBankForwardSpectraNoBackground(stresstesting.MantidStressTest):
 
+class BankByBankForwardSpectraNoBackground(stresstesting.MantidStressTest):
     _fit_results = None
 
     def runTest(self):
@@ -222,32 +223,26 @@ class BankByBankForwardSpectraNoBackground(stresstesting.MantidStressTest):
         self.assertEquals(4, len(self._fit_results))
 
         fitted_banks = self._fit_results[0]
-        self.assertTrue(isinstance(fitted_banks, list))
+        self.assertTrue(isinstance(fitted_banks, WorkspaceGroup))
         self.assertEqual(8, len(fitted_banks))
 
         bank1 = fitted_banks[0]
-        self.assertTrue(isinstance(bank1, WorkspaceGroup))
+        self.assertTrue(isinstance(bank1, MatrixWorkspace))
 
-        bank1_data = bank1[0]
-        self.assertTrue(isinstance(bank1_data, MatrixWorkspace))
+        self.assertAlmostEqual(50.0, bank1.readX(0)[0])
+        self.assertAlmostEqual(562.0, bank1.readX(0)[-1])
 
-        self.assertAlmostEqual(50.0, bank1_data.readX(0)[0])
-        self.assertAlmostEqual(562.0, bank1_data.readX(0)[-1])
+        _equal_within_tolerance(self, 8.23840378769e-05, bank1.readY(1)[0])
+        _equal_within_tolerance(self, 0.000556695665501, bank1.readY(1)[-1])
 
-        _equal_within_tolerance(self, 8.23840378769e-05, bank1_data.readY(1)[0])
-        _equal_within_tolerance(self, 0.000556695665501, bank1_data.readY(1)[-1])
+        bank8 = fitted_banks[7]
+        self.assertTrue(isinstance(bank8, MatrixWorkspace))
 
-        bank8 = fitted_banks[-1]
-        self.assertTrue(isinstance(bank8, WorkspaceGroup))
+        self.assertAlmostEqual(50.0, bank8.readX(0)[0])
+        self.assertAlmostEqual(562.0, bank8.readX(0)[-1])
 
-        bank8_data = bank8[0]
-        self.assertTrue(isinstance(bank8_data, MatrixWorkspace))
-
-        self.assertAlmostEqual(50.0, bank8_data.readX(0)[0])
-        self.assertAlmostEqual(562.0, bank8_data.readX(0)[-1])
-
-        _equal_within_tolerance(self, 0.00025454613205, bank8_data.readY(1)[0])
-        _equal_within_tolerance(self, 0.00050412575393, bank8_data.readY(1)[-1])
+        _equal_within_tolerance(self, 0.00025454613205, bank8.readY(1)[0])
+        _equal_within_tolerance(self, 0.00050412575393, bank8.readY(1)[-1])
 
         chisq_values = self._fit_results[2]
         self.assertTrue(isinstance(chisq_values, list))
@@ -256,11 +251,11 @@ class BankByBankForwardSpectraNoBackground(stresstesting.MantidStressTest):
         exit_iteration = self._fit_results[3]
         self.assertTrue(isinstance(exit_iteration, int))
 
-#====================================================================================
 
+# ====================================================================================
 
-class SpectraBySpectraForwardSpectraNoBackground(stresstesting.MantidStressTest):
 
+class SpectraBySpectraForwardSpectraNoBackground(stresstesting.MantidStressTest):
     _fit_results = None
 
     def runTest(self):
@@ -275,32 +270,26 @@ class SpectraBySpectraForwardSpectraNoBackground(stresstesting.MantidStressTest)
         self.assertEquals(4, len(self._fit_results))
 
         fitted_spec = self._fit_results[0]
-        self.assertTrue(isinstance(fitted_spec, list))
+        self.assertTrue(isinstance(fitted_spec, WorkspaceGroup))
         self.assertEqual(2, len(fitted_spec))
 
         spec143 = fitted_spec[0]
-        self.assertTrue(isinstance(spec143, WorkspaceGroup))
-
-        spec143_data = spec143[0]
-        self.assertTrue(isinstance(spec143_data, MatrixWorkspace))
-
-        self.assertAlmostEqual(50.0, spec143_data.readX(0)[0])
-        self.assertAlmostEqual(562.0, spec143_data.readX(0)[-1])
+        self.assertTrue(isinstance(spec143, MatrixWorkspace))
 
-        _equal_within_tolerance(self, 2.27289862507e-06, spec143_data.readY(1)[0])
-        _equal_within_tolerance(self, 3.49287467421e-05, spec143_data.readY(1)[-1])
+        self.assertAlmostEqual(50.0, spec143.readX(0)[0])
+        self.assertAlmostEqual(562.0, spec143.readX(0)[-1])
 
-        spec144 = fitted_spec[-1]
-        self.assertTrue(isinstance(spec144, WorkspaceGroup))
+        _equal_within_tolerance(self, 2.27289862507e-06, spec143.readY(1)[0])
+        _equal_within_tolerance(self, 3.49287467421e-05, spec143.readY(1)[-1])
 
-        spec144_data = spec144[0]
-        self.assertTrue(isinstance(spec144_data, MatrixWorkspace))
+        spec144 = fitted_spec[1]
+        self.assertTrue(isinstance(spec144, MatrixWorkspace))
 
-        self.assertAlmostEqual(50.0, spec144_data.readX(0)[0])
-        self.assertAlmostEqual(562.0, spec144_data.readX(0)[-1])
+        self.assertAlmostEqual(50.0, spec144.readX(0)[0])
+        self.assertAlmostEqual(562.0, spec144.readX(0)[-1])
 
-        _equal_within_tolerance(self, 5.9811662524e-06, spec144_data.readY(1)[0])
-        _equal_within_tolerance(self, 4.7479831769e-05, spec144_data.readY(1)[-1])
+        _equal_within_tolerance(self, 5.9811662524e-06, spec144.readY(1)[0])
+        _equal_within_tolerance(self, 4.7479831769e-05, spec144.readY(1)[-1])
 
         chisq_values = self._fit_results[2]
         self.assertTrue(isinstance(chisq_values, list))
@@ -309,4 +298,4 @@ class SpectraBySpectraForwardSpectraNoBackground(stresstesting.MantidStressTest)
         exit_iteration = self._fit_results[3]
         self.assertTrue(isinstance(exit_iteration, int))
 
-#====================================================================================
+# ====================================================================================
diff --git a/Testing/SystemTests/tests/analysis/reference/PG3_9829_sum_reference.gsa.md5 b/Testing/SystemTests/tests/analysis/reference/PG3_9829_sum_reference.gsa.md5
index 6379b10f439cf65a48db6f156b37e35b6d6848b0..34847a850c8beed54bf903bf86197bdb8461a35b 100644
--- a/Testing/SystemTests/tests/analysis/reference/PG3_9829_sum_reference.gsa.md5
+++ b/Testing/SystemTests/tests/analysis/reference/PG3_9829_sum_reference.gsa.md5
@@ -1 +1 @@
-7873b53ec5b94b039b2fcb040d513cd4
+90f6f9ae594a0686a27830b2cc5fb029
diff --git a/Testing/SystemTests/tests/analysis/reference/SANS2D_ws_D20_reference.nxs.md5 b/Testing/SystemTests/tests/analysis/reference/SANS2D_ws_D20_reference.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..0cbd2798cb6956cfc4488571a970647ad34ba81a
--- /dev/null
+++ b/Testing/SystemTests/tests/analysis/reference/SANS2D_ws_D20_reference.nxs.md5
@@ -0,0 +1 @@
+bc8b84337511fe5f442717dedf98a21f
diff --git a/Testing/SystemTests/tests/analysis/reference/SANS2D_ws_D20_reference_HAB_1D.nxs.md5 b/Testing/SystemTests/tests/analysis/reference/SANS2D_ws_D20_reference_HAB_1D.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..1cdf7b85235185e6532d3f38d8a2e4db90c54755
--- /dev/null
+++ b/Testing/SystemTests/tests/analysis/reference/SANS2D_ws_D20_reference_HAB_1D.nxs.md5
@@ -0,0 +1 @@
+bc37abb12a322758a7f3d82d5428f7ae
diff --git a/Testing/SystemTests/tests/analysis/reference/SANS2D_ws_D20_reference_LAB_1D.nxs.md5 b/Testing/SystemTests/tests/analysis/reference/SANS2D_ws_D20_reference_LAB_1D.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..4bc3d75385c003e18cf34ae78294160fa1fbbb34
--- /dev/null
+++ b/Testing/SystemTests/tests/analysis/reference/SANS2D_ws_D20_reference_LAB_1D.nxs.md5
@@ -0,0 +1 @@
+d1495cecef8cfce8a98d149e1083bcfd
diff --git a/Testing/SystemTests/tests/analysis/reference/SANS2D_ws_D20_reference_LAB_2D.nxs.md5 b/Testing/SystemTests/tests/analysis/reference/SANS2D_ws_D20_reference_LAB_2D.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..4338ef6a163ad6166f0eaa29502b4822116dcfe5
--- /dev/null
+++ b/Testing/SystemTests/tests/analysis/reference/SANS2D_ws_D20_reference_LAB_2D.nxs.md5
@@ -0,0 +1 @@
+4f4e0bcc36203a42cb1f4f0f6dd737c0
diff --git a/Testing/SystemTests/tests/analysis/reference/SANS2D_ws_D20_reference_Merged_1D.nxs.md5 b/Testing/SystemTests/tests/analysis/reference/SANS2D_ws_D20_reference_Merged_1D.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..5db5641783926ed3d170363b9853befe070b728d
--- /dev/null
+++ b/Testing/SystemTests/tests/analysis/reference/SANS2D_ws_D20_reference_Merged_1D.nxs.md5
@@ -0,0 +1 @@
+b12a20b840ddabe017c23f17733a0c75
diff --git a/Testing/SystemTests/tests/analysis/reference/VesuvioFittingTest.nxs.md5 b/Testing/SystemTests/tests/analysis/reference/VesuvioFittingTest.nxs.md5
index d578509ceb8b34845ce16e9c0d50b0dd923cfcb6..97e9140a596f50dab50aca9dce3dc5457c8f8e15 100644
--- a/Testing/SystemTests/tests/analysis/reference/VesuvioFittingTest.nxs.md5
+++ b/Testing/SystemTests/tests/analysis/reference/VesuvioFittingTest.nxs.md5
@@ -1 +1 @@
-f87942c1b37e17f184c27274137d0953
+a12119b70164b4e192b48488f85bcf75
diff --git a/Vates/VatesSimpleGui/ViewWidgets/CMakeLists.txt b/Vates/VatesSimpleGui/ViewWidgets/CMakeLists.txt
index 6ba725e55cf06bd3d13fe5f7be4cbcbed03cd57b..3192e10142f42b684115323b00376ef42033100b 100644
--- a/Vates/VatesSimpleGui/ViewWidgets/CMakeLists.txt
+++ b/Vates/VatesSimpleGui/ViewWidgets/CMakeLists.txt
@@ -20,6 +20,7 @@ set( INCLUDE_FILES
   inc/MantidVatesSimpleGuiViewWidgets/PeaksTabWidget.h
   inc/MantidVatesSimpleGuiViewWidgets/SaveScreenshotReaction.h
   inc/MantidVatesSimpleGuiViewWidgets/RebinnedSourcesManager.h
+  inc/MantidVatesSimpleGuiViewWidgets/VisibleAxesColor.h
   inc/MantidVatesSimpleGuiViewWidgets/StandardView.h
   inc/MantidVatesSimpleGuiViewWidgets/SplatterPlotView.h
   inc/MantidVatesSimpleGuiViewWidgets/ThreesliceView.h
@@ -46,6 +47,7 @@ set( SOURCE_FILES
   src/pqCameraReactionNonOrthogonalAxes.cpp
   src/SaveScreenshotReaction.cpp
   src/RebinnedSourcesManager.cpp
+  src/VisibleAxesColor.cpp
   src/StandardView.cpp
   src/SplatterPlotView.cpp
   src/ThreesliceView.cpp
diff --git a/Vates/VatesSimpleGui/ViewWidgets/inc/MantidVatesSimpleGuiViewWidgets/MdViewerWidget.h b/Vates/VatesSimpleGui/ViewWidgets/inc/MantidVatesSimpleGuiViewWidgets/MdViewerWidget.h
index ce9f57bf50f95d8e674843ca6e215a6c73571966..11bf5451ec8facaad66ca4b66e3200145c4cba14 100644
--- a/Vates/VatesSimpleGui/ViewWidgets/inc/MantidVatesSimpleGuiViewWidgets/MdViewerWidget.h
+++ b/Vates/VatesSimpleGui/ViewWidgets/inc/MantidVatesSimpleGuiViewWidgets/MdViewerWidget.h
@@ -11,9 +11,10 @@
 #include "MantidVatesSimpleGuiViewWidgets/WidgetDllOption.h"
 #include "MantidVatesAPI/ColorScaleGuard.h"
 
-#include "boost/shared_ptr.hpp"
+#include "vtkSmartPointer.h"
 
-#include <vtkSmartPointer.h>
+#include "boost/optional.hpp"
+#include "boost/shared_ptr.hpp"
 
 // forward declaration of ParaQ classes
 class pqApplicationSettingsReaction;
@@ -102,7 +103,7 @@ public:
   std::string getWindowType() override;
 
 public slots:
-  /// Seet MantidQt::API::VatesViewerInterface
+  /// See MantidQt::API::VatesViewerInterface
   void shutdown() override;
 
 protected slots:
@@ -148,6 +149,7 @@ protected:
 
 private:
   Q_DISABLE_COPY(MdViewerWidget)
+  boost::optional<unsigned long> m_axesTag;
   QString m_widgetName;
 
   ViewBase *currentView; ///< Holder for the current (shown) view
@@ -255,6 +257,8 @@ private:
                                         QStringList &wsNames);
   /// Set up the default color for the background of the view.
   void setColorForBackground();
+  /// Sets axes colors that are visible against the background.
+  void setVisibleAxesColors();
   /// Set the color map
   void setColorMap();
   /// Render the original workspace
diff --git a/Vates/VatesSimpleGui/ViewWidgets/inc/MantidVatesSimpleGuiViewWidgets/ViewBase.h b/Vates/VatesSimpleGui/ViewWidgets/inc/MantidVatesSimpleGuiViewWidgets/ViewBase.h
index 1ea71dbb53bd7c226d36524c7778f15ab3953d3f..f01c683c2e1b6424a1bacd7c45f40444f83604e8 100644
--- a/Vates/VatesSimpleGui/ViewWidgets/inc/MantidVatesSimpleGuiViewWidgets/ViewBase.h
+++ b/Vates/VatesSimpleGui/ViewWidgets/inc/MantidVatesSimpleGuiViewWidgets/ViewBase.h
@@ -1,14 +1,15 @@
 #ifndef VIEWBASE_H_
 #define VIEWBASE_H_
 
+#include "MantidVatesAPI/ColorScaleGuard.h"
+#include "MantidVatesSimpleGuiViewWidgets/VisibleAxesColor.h"
+#include "MantidVatesSimpleGuiQtWidgets/ModeControlWidget.h"
 #include "MantidVatesSimpleGuiViewWidgets/BackgroundRgbProvider.h"
 #include "MantidVatesSimpleGuiViewWidgets/ColorUpdater.h"
 #include "MantidVatesSimpleGuiViewWidgets/WidgetDllOption.h"
-#include "MantidVatesSimpleGuiQtWidgets/ModeControlWidget.h"
-#include "MantidVatesAPI/ColorScaleGuard.h"
+#include "vtk_jsoncpp.h"
 #include <QPointer>
 #include <QWidget>
-#include "vtk_jsoncpp.h"
 
 class pqDataRepresentation;
 class pqObjectBuilder;
@@ -123,6 +124,8 @@ public:
   virtual void setColorForBackground(bool useCurrentColorSettings);
   /// Sets the splatterplot button to the desired visibility.
   virtual void setSplatterplot(bool visibility);
+  /// Sets axes colors that contrast with the background.
+  virtual unsigned long setVisibleAxesColors();
   /// Initializes the settings of the color scale
   virtual void initializeColorScale();
   /// Sets the standard veiw button to the desired visibility.
@@ -265,6 +268,7 @@ private:
   BackgroundRgbProvider backgroundRgbProvider; /// < Holds the manager for
                                                /// background color related
                                                /// tasks.
+  VisibleAxesColor m_visibleAxesColor;
   RebinnedSourcesManager *m_rebinnedSourcesManager;
   Json::Value m_currentColorMapModel;
 
diff --git a/Vates/VatesSimpleGui/ViewWidgets/inc/MantidVatesSimpleGuiViewWidgets/VisibleAxesColor.h b/Vates/VatesSimpleGui/ViewWidgets/inc/MantidVatesSimpleGuiViewWidgets/VisibleAxesColor.h
new file mode 100644
index 0000000000000000000000000000000000000000..1d190ae3f7e8d897bf12e890502a43ca9cbdf83e
--- /dev/null
+++ b/Vates/VatesSimpleGui/ViewWidgets/inc/MantidVatesSimpleGuiViewWidgets/VisibleAxesColor.h
@@ -0,0 +1,55 @@
+#ifndef MANTID_VISIBLEAXESCOLOR_H_
+#define MANTID_VISIBLEAXESCOLOR_H_
+#include "MantidVatesSimpleGuiViewWidgets/WidgetDllOption.h"
+#include "pqRenderView.h"
+
+namespace Mantid {
+namespace Vates {
+namespace SimpleGui {
+
+/**
+ *
+
+  Copyright &copy; 2016 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge
+  National Laboratory & European Spallation Source
+
+  This file is part of Mantid.
+
+  Mantid is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  Mantid is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+  File change history is stored at: <https://github.com/mantidproject/mantid>
+  Code Documentation is available at: <http://doxygen.mantidproject.org>
+*/
+
+class EXPORT_OPT_MANTIDVATES_SIMPLEGUI_VIEWWIDGETS VisibleAxesColor {
+public:
+  /**
+   * Set the Rgb values for the color of the view's orientation axes Label.
+   * @param view The view which has its color set.
+   */
+  unsigned long setAndObserveAxesColor(pqView *view);
+  void setOrientationAxesLabelColor(pqView *view,
+                                    const std::array<double, 3> &color);
+  void setGridAxesColor(pqView *view, const std::array<double, 3> &color);
+  void setScalarBarColor(pqView *view, const std::array<double, 3> &color);
+  unsigned long observe(pqView *view);
+
+private:
+  void backgroundColorChangeCallback(vtkObject *caller, unsigned long, void *);
+};
+}
+}
+}
+
+#endif // MANTID_VISIBLEAXESCOLOR_H_
diff --git a/Vates/VatesSimpleGui/ViewWidgets/src/BackgroundRgbProvider.cpp b/Vates/VatesSimpleGui/ViewWidgets/src/BackgroundRgbProvider.cpp
index 37a628bac26eb4d5e588f35d67482c6cda623fcf..7630660855e031e86f62bdd24338b46067e2fd40 100644
--- a/Vates/VatesSimpleGui/ViewWidgets/src/BackgroundRgbProvider.cpp
+++ b/Vates/VatesSimpleGui/ViewWidgets/src/BackgroundRgbProvider.cpp
@@ -1,23 +1,18 @@
 #include "MantidVatesSimpleGuiViewWidgets/BackgroundRgbProvider.h"
 #include "MantidQtAPI/MdSettings.h"
 #include "MantidKernel/Logger.h"
+
+#include <array>
+#include <cmath>
 #include <vector>
 
-// Have to deal with ParaView warnings and Intel compiler the hard way.
-#if defined(__INTEL_COMPILER)
-#pragma warning disable 1170
-#endif
 #include <pqRenderView.h>
+#include <vtkCallbackCommand.h>
+#include <vtkCommand.h>
 #include <vtkSMDoubleVectorProperty.h>
 #include <vtkSMViewProxy.h>
-#include <vtkCommand.h>
-#include <vtkCallbackCommand.h>
 #include <vtkSmartPointer.h>
 
-#if defined(__INTEL_COMPILER)
-#pragma warning enable 1170
-#endif
-
 namespace Mantid {
 namespace Vates {
 namespace SimpleGui {
diff --git a/Vates/VatesSimpleGui/ViewWidgets/src/ColorUpdater.cpp b/Vates/VatesSimpleGui/ViewWidgets/src/ColorUpdater.cpp
index 702ceaec9b29acf637fc9b1c5b4890cc9c906137..c7c709e8d49287d30fd5318df3a7b4b21776586b 100644
--- a/Vates/VatesSimpleGui/ViewWidgets/src/ColorUpdater.cpp
+++ b/Vates/VatesSimpleGui/ViewWidgets/src/ColorUpdater.cpp
@@ -17,8 +17,8 @@
 #include <pqServerManagerModel.h>
 #include <pqSMAdaptor.h>
 
-#include <vtkCallbackCommand.h>
 #include "vtk_jsoncpp.h"
+#include <vtkCallbackCommand.h>
 #include <vtkSMDoubleVectorProperty.h>
 #include <vtkSMIntVectorProperty.h>
 #include <vtkSMProxy.h>
diff --git a/Vates/VatesSimpleGui/ViewWidgets/src/MdViewerWidget.cpp b/Vates/VatesSimpleGui/ViewWidgets/src/MdViewerWidget.cpp
index 59125fa6f3353b6f23ebcb95d6183e871d4a77a3..8d01c3a66c622caea7be75c824bae8f4654d8bc1 100644
--- a/Vates/VatesSimpleGui/ViewWidgets/src/MdViewerWidget.cpp
+++ b/Vates/VatesSimpleGui/ViewWidgets/src/MdViewerWidget.cpp
@@ -41,7 +41,6 @@
 #if defined(__INTEL_COMPILER)
 #pragma warning disable 1170
 #endif
-#include <pqApplicationCore.h>
 #include <pqActiveObjects.h>
 #include <pqAnimationManager.h>
 #include <pqAnimationScene.h>
@@ -51,31 +50,32 @@
 #include <pqDeleteReaction.h>
 #include <pqLoadDataReaction.h>
 #include <pqObjectBuilder.h>
+#include <pqPVApplicationCore.h>
 #include <pqParaViewBehaviors.h>
-#include <pqPipelineSource.h>
 #include <pqPipelineFilter.h>
-#include <pqPVApplicationCore.h>
+#include <pqPipelineRepresentation.h>
+#include <pqPipelineSource.h>
 #include <pqRenderView.h>
-#include <pqSettings.h>
 #include <pqServer.h>
 #include <pqServerManagerModel.h>
+#include <pqSettings.h>
 #include <pqStatusBar.h>
 #include <vtkCamera.h>
+#include <vtkCommand.h>
 #include <vtkMathTextUtilities.h>
 #include <vtkPVOrthographicSliceView.h>
 #include <vtkPVXMLElement.h>
 #include <vtkPVXMLParser.h>
 #include <vtkSMDoubleVectorProperty.h>
 #include <vtkSMPropertyHelper.h>
-#include <vtkSMProxyManager.h>
 #include <vtkSMProxy.h>
+#include <vtkSMProxyManager.h>
 #include <vtkSMReaderFactory.h>
 #include <vtkSMRenderViewProxy.h>
 #include <vtkSMSessionProxyManager.h>
 #include <vtkSMSourceProxy.h>
 #include <vtkSMViewProxy.h>
 #include <vtksys/SystemTools.hxx>
-#include <pqPipelineRepresentation.h>
 
 // Used for plugin mode
 #include <pqAlwaysConnectedBehavior.h>
@@ -693,6 +693,7 @@ void MdViewerWidget::renderWorkspace(QString workspaceName, int workspaceType,
   //             after the window is started again.
   if (this->currentView->getNumSources() == 0) {
     this->setColorForBackground();
+    this->setVisibleAxesColors();
     this->setColorMap();
 
     if (VatesViewerInterface::PEAKS != workspaceType) {
@@ -713,7 +714,7 @@ void MdViewerWidget::renderWorkspace(QString workspaceName, int workspaceType,
     this->useCurrentColorSettings = true;
   }
 
-  QString sourcePlugin = "";
+  QString sourcePlugin;
   if (VatesViewerInterface::PEAKS == workspaceType) {
     sourcePlugin = "Peaks Source";
   } else if (VatesViewerInterface::MDHW == workspaceType) {
@@ -1139,6 +1140,7 @@ std::string MdViewerWidget::getWindowType() { return "VSIWindow"; }
 void MdViewerWidget::renderAndFinalSetup() {
   Mantid::VATES::ColorScaleLockGuard colorScaleLockGuard(&m_colorScaleLock);
   this->setColorForBackground();
+  this->setVisibleAxesColors();
   this->currentView->render();
   this->setColorMap();
   this->currentView->setColorsForView(this->ui.colorSelectionWidget);
@@ -1156,6 +1158,20 @@ void MdViewerWidget::setColorForBackground() {
   this->currentView->setColorForBackground(this->useCurrentColorSettings);
 }
 
+void MdViewerWidget::setVisibleAxesColors() {
+  if (mdSettings.getUserSettingAutoColorAxes()) {
+    // Only add the observer once.
+    if (!m_axesTag) {
+      m_axesTag = this->currentView->setVisibleAxesColors();
+    }
+  } else if (m_axesTag) {
+    this->currentView->getView()
+        ->getViewProxy()
+        ->GetProperty("Background")
+        ->RemoveObserver(*m_axesTag);
+  }
+}
+
 /**
  * This function is used during the post-apply process of particular pipeline
  * filters to check for updates to anything that relies on information from the
@@ -1233,6 +1249,7 @@ void MdViewerWidget::switchViews(ModeControlWidget::Views v) {
   restoreViewState(this->currentView, v);
   this->currentView->setColorsForView(this->ui.colorSelectionWidget);
   this->setColorForBackground();
+  this->setVisibleAxesColors();
 
   this->currentView->checkViewOnSwitch();
   this->updateAppState();
diff --git a/Vates/VatesSimpleGui/ViewWidgets/src/ViewBase.cpp b/Vates/VatesSimpleGui/ViewWidgets/src/ViewBase.cpp
index 3511cb790281379e4cb7df817b7641d3a4140862..70ca26a40eb0d8d1d9a3e8ea15bc67bba1e4a6ee 100644
--- a/Vates/VatesSimpleGui/ViewWidgets/src/ViewBase.cpp
+++ b/Vates/VatesSimpleGui/ViewWidgets/src/ViewBase.cpp
@@ -10,10 +10,8 @@
 #include "MantidVatesAPI/BoxInfo.h"
 #include "MantidKernel/WarningSuppressions.h"
 #include "MantidKernel/make_unique.h"
-#if defined(__INTEL_COMPILER)
-#pragma warning disable 1170
-#endif
 
+#include <QVTKWidget.h>
 #include <pqActiveObjects.h>
 #include <pqAnimationManager.h>
 #include <pqAnimationScene.h>
@@ -21,19 +19,18 @@
 #include <pqDataRepresentation.h>
 #include <pqDeleteReaction.h>
 #include <pqObjectBuilder.h>
+#include <pqPVApplicationCore.h>
 #include <pqPipelineFilter.h>
 #include <pqPipelineRepresentation.h>
 #include <pqPipelineSource.h>
-#include <pqPVApplicationCore.h>
 #include <pqRenderView.h>
 #include <pqScalarsToColors.h>
 #include <pqServer.h>
 #include <pqServerManagerModel.h>
 #include <pqView.h>
-#include <QVTKWidget.h>
-#include <vtkRendererCollection.h>
 #include <vtkRenderWindow.h>
 #include <vtkRenderWindowInteractor.h>
+#include <vtkRendererCollection.h>
 #include <vtkSMDoubleVectorProperty.h>
 #include <vtkSMPropertyHelper.h>
 #include <vtkSMPropertyIterator.h>
@@ -43,10 +40,6 @@
 
 #include <pqMultiSliceAxisWidget.h>
 
-#if defined(__INTEL_COMPILER)
-#pragma warning enable 1170
-#endif
-
 #include <QHBoxLayout>
 #include <QPointer>
 #include <QThread>
@@ -736,6 +729,14 @@ void ViewBase::setColorForBackground(bool useCurrentColorSettings) {
   backgroundRgbProvider.observe(this->getView());
 }
 
+/**
+ * This function sets the default colors for the background and connects a
+ * tracker for changes of the background color by the user.
+ */
+unsigned long ViewBase::setVisibleAxesColors() {
+  return this->m_visibleAxesColor.setAndObserveAxesColor(this->getView());
+}
+
 /**
  * Set color scale lock
  * @param colorScaleLock: the color scale lock
diff --git a/Vates/VatesSimpleGui/ViewWidgets/src/VisibleAxesColor.cpp b/Vates/VatesSimpleGui/ViewWidgets/src/VisibleAxesColor.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..868633db1348078ad542ae5c6e1a80e1e50ef1ae
--- /dev/null
+++ b/Vates/VatesSimpleGui/ViewWidgets/src/VisibleAxesColor.cpp
@@ -0,0 +1,127 @@
+#include "MantidVatesSimpleGuiViewWidgets/VisibleAxesColor.h"
+
+#include <array>
+#include <vector>
+
+#include "pqActiveObjects.h"
+#include "pqApplicationCore.h"
+#include "pqServerManagerModel.h"
+#include "vtkCommand.h"
+#include "vtkSMDoubleVectorProperty.h"
+#include "vtkSMProperty.h"
+#include "vtkSMPropertyHelper.h"
+#include "vtkSMTransferFunctionProxy.h"
+#include "vtkSMViewProxy.h"
+
+namespace Mantid {
+namespace Vates {
+namespace SimpleGui {
+
+namespace {
+
+void safeSetProperty(vtkSMProxy *gridAxis,
+                     std::initializer_list<const char *> pnames,
+                     const std::array<double, 3> &value) {
+  if (gridAxis) {
+    for (auto pname : pnames) {
+      vtkSMProperty *prop = gridAxis->GetProperty(pname);
+      if (prop) {
+        vtkSMPropertyHelper helper(prop);
+        helper.Set(value.data(), 3);
+        gridAxis->UpdateProperty(pname);
+      }
+    }
+  }
+}
+
+std::array<double, 3> getContrastingColor(const std::vector<double> &color) {
+  // Approximate formula for color brightness
+  // https://www.w3.org/TR/AERT#color-contrast
+
+  double criteria =
+      1. - (0.299 * color[0] + 0.587 * color[1] + 0.114 * color[2]);
+
+  if (criteria < 0.5)
+    return {{0., 0., 0.}};
+  else
+    return {{1., 1., 1.}};
+}
+
+std::vector<double> getBackgroundColor(pqView *view) {
+  vtkSMProperty *prop = view->getProxy()->GetProperty("Background");
+  return vtkSMPropertyHelper(prop).GetDoubleArray();
+}
+}
+
+unsigned long VisibleAxesColor::setAndObserveAxesColor(pqView *view) {
+  auto color = getContrastingColor(getBackgroundColor(view));
+  this->setOrientationAxesLabelColor(view, color);
+  this->setGridAxesColor(view, color);
+  this->setScalarBarColor(view, color);
+  return this->observe(view);
+}
+
+void VisibleAxesColor::setOrientationAxesLabelColor(
+    pqView *view, const std::array<double, 3> &color) {
+  safeSetProperty(view->getProxy(), {"OrientationAxesLabelColor"}, color);
+}
+
+void VisibleAxesColor::setGridAxesColor(pqView *view,
+                                        const std::array<double, 3> &color) {
+  vtkSMProxy *gridAxes3DActor =
+      vtkSMPropertyHelper(view->getProxy(), "AxesGrid", true).GetAsProxy();
+  safeSetProperty(gridAxes3DActor,
+                  {"XTitleColor", "YTitleColor", "ZTitleColor", "XLabelColor",
+                   "YLabelColor", "ZLabelColor", "GridColor"},
+                  color);
+}
+
+void VisibleAxesColor::setScalarBarColor(pqView *view,
+                                         const std::array<double, 3> &color) {
+  // Update for all sources and all reps
+  pqServer *server = pqActiveObjects::instance().activeServer();
+  pqServerManagerModel *smModel =
+      pqApplicationCore::instance()->getServerManagerModel();
+
+  const QList<pqPipelineSource *> sources =
+      smModel->findItems<pqPipelineSource *>(server);
+  // For all sources
+  for (pqPipelineSource *source : sources) {
+    const QList<pqDataRepresentation *> reps = source->getRepresentations(view);
+    // For all representations
+    for (pqDataRepresentation *rep : reps) {
+      vtkSMProxy *ScalarBarProxy =
+          vtkSMTransferFunctionProxy::FindScalarBarRepresentation(
+              rep->getLookupTableProxy(), view->getProxy());
+      safeSetProperty(ScalarBarProxy, {"TitleColor", "LabelColor"}, color);
+    }
+  }
+}
+
+unsigned long VisibleAxesColor::observe(pqView *view) {
+  return view->getViewProxy()
+      ->GetProperty("Background")
+      ->AddObserver(vtkCommand::ModifiedEvent, this,
+                    &VisibleAxesColor::backgroundColorChangeCallback);
+}
+
+void VisibleAxesColor::backgroundColorChangeCallback(vtkObject *caller,
+                                                     unsigned long, void *) {
+  vtkSMDoubleVectorProperty *background =
+      vtkSMDoubleVectorProperty::SafeDownCast(caller);
+  int numberOfElements = background->GetNumberOfElements();
+  double *elements = background->GetElements();
+  std::vector<double> backgroundColor(elements, elements + numberOfElements);
+
+  auto color = getContrastingColor(backgroundColor);
+
+  pqView *view = pqActiveObjects::instance().activeView();
+
+  this->setOrientationAxesLabelColor(view, color);
+  this->setGridAxesColor(view, color);
+  this->setScalarBarColor(view, color);
+}
+
+} // SimpleGui
+} // Vates
+} // Mantid
diff --git a/docs/source/algorithms/CountReflections-v1.rst b/docs/source/algorithms/CountReflections-v1.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a04197df2de978780c3669e55a7429f93af6f174
--- /dev/null
+++ b/docs/source/algorithms/CountReflections-v1.rst
@@ -0,0 +1,89 @@
+
+.. algorithm::
+
+.. summary::
+
+.. alias::
+
+.. properties::
+
+Description
+-----------
+
+This algorithm computes some crystallographic data set quality indicators that are based
+on counting reflections according to their Miller indices HKL. Intensity information is not
+required for these indicators, so that the algorithm can also be used with predicted data
+(for example generated by :ref:`algm-PredictPeaks`).
+
+According to the specified lattice centering and the resolution boundaries, a set of
+theoretically measurable reflections is generated. How the reflections are mapped to
+a set of :math:`N_{theor.}` unique reflections depends on the supplied point group. Then the
+:math:`N_{observed}` actually observed peaks from the input workspace are assigned to their
+respective unique reflection, yielding :math:`N_{unique}` observed unique reflections.
+
+From this assignment it is possible to calculate the following indicators:
+
+  * Unique reflections: :math:`N_{unique}`
+  * Completeness: :math:`\frac{N_{unique}}{N_{theor.}}`
+  * Redundancy: :math:`\frac{N_{observed}}{N_{unique}}`
+  * Multiply observed reflections: :math:`\frac{N_{unique} | N^{hkl}_{observed} > 1}{N_{unique}}`
+
+Furthermore, the algorithm optionally produces a list of missing reflections. In this list,
+each missing unique reflection is expanded to all symmetry equivalents according to the point
+group. For example, if the reflection family :math:`\left{001\right}` was missing
+with point group :math:`\bar{1}`, the list would contain :math:`(001)` and :math:`(00\bar{1})`.
+
+The reason for expanding the unique reflections is to make the list more useful as an input
+to :ref:`algm-PredictPeaks` again.
+
+.. note::
+
+    This algorithm has some overlap with :ref:`algm-SortHKL`, which computes some of the indicators this
+    algorithm calculates, but in addition also evaluates intensity information. SortHKL only works with
+    peaks that carry intensity data while this algorithm also works without intensities.
+
+Usage
+-----
+
+The usage example uses the same data as the usage test in :ref:`algm-SortHKL`, but produces slightly different
+data, because some intensities in the input file are 0, so these reflections are ignored by :ref:`algm-SortHKL`:
+
+.. testcode:: CountReflectionsExample
+
+    # Load example peak data and find cell
+    peaks = LoadIsawPeaks(Filename=r'Peaks5637.integrate')
+
+    FindUBUsingFFT(peaks, MinD=0.25, MaxD=10, Tolerance=0.2)
+    SelectCellWithForm(peaks, FormNumber=9, Apply=True, Tolerance=0.15)
+    OptimizeLatticeForCellType(peaks, CellType='Hexagonal', Apply=True, Tolerance=0.2)
+
+    # Run the SortHKL algorithm
+    unique, completeness, redundancy, multiple = CountReflections(peaks, PointGroup='-3m1',
+                                                                  LatticeCentering='Robv', MinDSpacing=0.205,
+                                                                  MaxDSpacing=2.08, MissingReflectionsWorkspace='')
+
+    print 'Data set statistics:'
+    print '             Peaks: {0}'.format(peaks.getNumberPeaks())
+    print '            Unique: {0}'.format(unique)
+    print '      Completeness: {0}%'.format(round(completeness * 100, 2))
+    print '        Redundancy: {0}'.format(round(redundancy, 2))
+    print ' Multiply observed: {0}%'.format(round(multiple*100, 2))
+
+Output:
+
+.. testoutput:: CountReflectionsExample
+
+    Data set statistics:
+                 Peaks: 434
+                Unique: 358
+          Completeness: 9.57%
+            Redundancy: 1.21
+     Multiply observed: 20.67%
+
+The resulting completeness is slightly higher than in the SortHKL case, but for actual statistics it might be
+better to remove the zero intensity peaks from the workspace prior to running the algorithm.
+
+.. categories::
+
+.. sourcelink::
+
diff --git a/docs/source/algorithms/DeltaPDF3D-v1.rst b/docs/source/algorithms/DeltaPDF3D-v1.rst
index 51838b354f117a231a34cc36c311bb7c217b33ad..0d0e00149ea3b3f9ce4c4663a5afde1dc05ea036 100644
--- a/docs/source/algorithms/DeltaPDF3D-v1.rst
+++ b/docs/source/algorithms/DeltaPDF3D-v1.rst
@@ -25,7 +25,11 @@ The input workspace must be a :ref:`MDHistoWorkspace
 The convolution option requires `astropy
 <http://docs.astropy.org/en/stable/index.html>`_ to be installed as it
 uses `astropy.convolution
-<http://docs.astropy.org/en/stable/convolution/>`_.
+<http://docs.astropy.org/en/stable/convolution/>`_. The convolution
+can be very slow for large workspaces, it will attempt to use
+astropy.convolution.convolve_fft (which is fast but only works for
+small workspace) but will use astropy.convolution.convolve (which is
+slow) if the workspace is too large.
 
 References
 ----------
@@ -98,7 +102,7 @@ The IntermediateWorkspace shows the changes to the input workspace.
 .. testcode:: fft2
 
    DeltaPDF3D(InputWorkspace='DeltaPDF3D_MDH',OutputWorkspace='fft2',IntermediateWorkspace='int2',
-              RemoveReflections=True,Size=0.4,Convolution=False)
+              RemoveReflections=True,Size=0.3,Convolution=False)
    print "The value at [1,0,0] is " + str(mtd['fft2'].signalAt(1866))
    print "The value at [0,1,0] is " + str(mtd['fft2'].signalAt(2226))
 
@@ -123,7 +127,7 @@ The IntermediateWorkspace shows the changes to the input workspace.
 .. testcode:: fft3
 
    DeltaPDF3D(InputWorkspace='DeltaPDF3D_MDH',OutputWorkspace='fft3',IntermediateWorkspace='int3',
-              RemoveReflections=True,Size=0.4,CropSphere=True,SphereMax=3,Convolution=False)
+              RemoveReflections=True,Size=0.3,CropSphere=True,SphereMax=3,Convolution=False)
    print "The value at [1,0,0] is " + str(mtd['fft3'].signalAt(1866))
    print "The value at [0,1,0] is " + str(mtd['fft3'].signalAt(2226))
 
@@ -143,12 +147,38 @@ The IntermediateWorkspace shows the changes to the input workspace.
 .. |int3| image:: /images/DeltaPDF3D_int3.png
    :width: 100%
 
+**Removing Reflections and crop to sphere with fill value**
+The fill value should be about the background level
+
+.. testcode:: fft3_2
+
+   DeltaPDF3D(InputWorkspace='DeltaPDF3D_MDH',OutputWorkspace='fft3',IntermediateWorkspace='int3',
+              RemoveReflections=True,Size=0.3,CropSphere=True,SphereMax=3,Convolution=False)
+   print "The value at [1,0,0] is " + str(mtd['fft3'].signalAt(1866))
+   print "The value at [0,1,0] is " + str(mtd['fft3'].signalAt(2226))
+
+.. testoutput:: fft3_2
+
+   The value at [1,0,0] is -477.173658361
+   The value at [0,1,0] is 501.081754175
+
++---------------------------------------------------------------------+---------------------------------------------------------------------+
+| Intermediate workspace after reflections removed and crop to sphere | Resulting 3D-ΔPDF                                                   |
++---------------------------------------------------------------------+---------------------------------------------------------------------+
+| |int3_2|                                                            | |fft3_2|                                                            |
++---------------------------------------------------------------------+---------------------------------------------------------------------+
+
+.. |fft3_2| image:: /images/DeltaPDF3D_fft3_2.png
+   :width: 100%
+.. |int3_2| image:: /images/DeltaPDF3D_int3_2.png
+   :width: 100%
+
 **Applying convolution**
 
 .. code-block:: python
 
    DeltaPDF3D(InputWorkspace='DeltaPDF3D_MDH',OutputWorkspace='fft4',IntermediateWorkspace='int4'
-              RemoveReflections=True,Size=0.4,CropSphere=True,SphereMax=3,Convolution=True)
+              RemoveReflections=True,Size=0.3,CropSphere=True,SphereMax=3,Convolution=True)
    print "The value at [1,0,0] is " + str(mtd['fft4'].signalAt(1866))
    print "The value at [0,1,0] is " + str(mtd['fft4'].signalAt(2226))
 
@@ -168,6 +198,31 @@ The IntermediateWorkspace shows the changes to the input workspace.
 .. |int4| image:: /images/DeltaPDF3D_int4.png
    :width: 100%
 
+**Applying convolution and deconvolution**
+
+.. code-block:: python
+
+   DeltaPDF3D(InputWorkspace='DeltaPDF3D_MDH',OutputWorkspace='fft5',IntermediateWorkspace='int5'
+              RemoveReflections=True,Size=0.3,CropSphere=True,SphereMax=3,Convolution=True,Deconvolution=True)
+   print "The value at [1,0,0] is " + str(mtd['fft5'].signalAt(1866))
+   print "The value at [0,1,0] is " + str(mtd['fft5'].signalAt(2226))
+
+.. code-block:: none
+
+   The value at [1,0,0] is -95.0767841089
+   The value at [0,1,0] is 99.3534883663
+
++--------------------------------------------------------------+--------------------------------------------------------------+
+| The deconvolution array, workspace signal is divided by this | Resulting 3D-ΔPDF                                            |
++--------------------------------------------------------------+--------------------------------------------------------------+
+| |deconv|                                                     | |fft5|                                                       |
++--------------------------------------------------------------+--------------------------------------------------------------+
+
+.. |fft5| image:: /images/DeltaPDF3D_fft5.png
+   :width: 100%
+.. |deconv| image:: /images/DeltaPDF3D_deconv.png
+   :width: 100%
+
 .. categories::
 
 .. sourcelink::
diff --git a/docs/source/algorithms/FindEPP-v1.rst b/docs/source/algorithms/FindEPP-v1.rst
index d8cb9345e334ac1164dbf210709472463c7eef0e..ffa00dab2f4d08f7e3b338a51b5c1626edb44c3c 100644
--- a/docs/source/algorithms/FindEPP-v1.rst
+++ b/docs/source/algorithms/FindEPP-v1.rst
@@ -28,7 +28,7 @@ Usage
                 XMin=4005.75, XMax=7995.75, BinWidth=10.5, BankDistanceFromSample=4.0)
 
     # search for elastic peak positions
-    table = FindEPP(ws)
+    table = FindEPP(ws, Version=1)
 
     # print some results
     print "The fit status is", table.row(0)['FitStatus']
@@ -47,3 +47,6 @@ Output:
 .. categories::
 
 .. sourcelink::
+	:filename: FindEPP
+	:cpp: None
+	:h: None
diff --git a/docs/source/algorithms/FindEPP-v2.rst b/docs/source/algorithms/FindEPP-v2.rst
new file mode 100644
index 0000000000000000000000000000000000000000..398731f00d3bee715e46adfbc2fcd68c93b4f5bd
--- /dev/null
+++ b/docs/source/algorithms/FindEPP-v2.rst
@@ -0,0 +1,60 @@
+.. algorithm::
+
+.. summary::
+
+.. alias::
+
+.. properties::
+
+Description
+-----------
+
+This is the rewrite of :ref:`algm-FindEPP-v1` python algorithm in C++, that offers significant performance gain when running over large workspaces.
+
+This utility algorithm attempts to search for the elastic peak position (EPP) in each spectrum of the given workspace. The algorithm estimates the starting parameters and performs Gaussian fit using the :ref:`algm-Fit` algorithm.
+
+.. note::
+    This algorithm uses very simple approach to search for an elastic peak: it suggests that the elastic peak has maximal intensity. This approach may fail in the case if the dataset contains Bragg peaks with higher intensities.
+
+As a result, `TableWorkspace <http://www.mantidproject.org/TableWorkspace>`_ with the following columns is produced: *WorkspaceIndex*, *PeakCentre*, *PeakCentreError*, *Sigma*, *SigmaError*, *Height*, *HeightError*, *chiSq* and *FitStatus*. Table rows correspond to the workspace indices.
+
+Last column will contain the status of peak finding as follows:
+
+* **success** : If the fit succeeded, the row is populated with the corresponding values obtained by the fit.
+* **fitFailed** : If the fit failed (for whatever reason). A debug message will be logged with a detailed failure message from the fit algorithm. *PeakCentre* is filled with the maximum.
+* **narrowPeak** : If there are `<3` bins around the maximum, that have `>0.5*MAX`. An information is logged, fit is not tried. *PeakCentre* is filled with the maximum.
+* **negativeMaximum** : If the maximum of the spectrum is not positive. A message will be logged in notice channel. Fit is not attempted.
+
+Usage
+-----
+**Example: Find EPP in the given workspace.**
+
+.. testcode:: ExFindEPP
+
+    # create sample workspace
+    ws = CreateSampleWorkspace(Function="User Defined", UserDefinedFunction="name=LinearBackground, \
+                A0=0.3;name=Gaussian, PeakCentre=6000, Height=5, Sigma=75", NumBanks=2, BankPixelWidth=1,
+                XMin=4005.75, XMax=7995.75, BinWidth=10.5, BankDistanceFromSample=4.0)
+
+    # search for elastic peak positions
+    table = FindEPP(ws)
+
+    # print some results
+    print "The fit status is", table.row(0)['FitStatus']
+    print "The peak centre is at", round(table.row(0)['PeakCentre'], 2), "microseconds"
+    print "The peak height is", round(table.row(0)['Height'],2)
+
+Output:
+
+.. testoutput:: ExFindEPP
+
+    The fit status is success
+    The peak centre is at 6005.25 microseconds
+    The peak height is 4.84
+
+
+.. categories::
+
+.. sourcelink::
+   :filename: FindEPP 
+   :py: None
diff --git a/docs/source/algorithms/IndirectILLEnergyTransfer-v1.rst b/docs/source/algorithms/IndirectILLEnergyTransfer-v1.rst
index f50cd680b8aa6d6b5fb3b329c52789574b1bcec3..0b66e985b252fd3cc46560576262357f9e2e9101 100644
--- a/docs/source/algorithms/IndirectILLEnergyTransfer-v1.rst
+++ b/docs/source/algorithms/IndirectILLEnergyTransfer-v1.rst
@@ -11,7 +11,7 @@ Description
 
 This is a part of multi-algorithm reduction workflow for **IN16B** indirect geometry instrument at **ILL**.
 It handles the first steps of the reduction chain, such as grouping of the detectors, normalizing to monitor dependent on the reduction type.
-It performs transformation of the axes; x-axis from channel number to energy transfer, y-axis to scattering angle.
+It performs transformation of the axes; x-axis from channel number to energy transfer, and optionally y-axis to scattering angle or elastic momentum transfer.
 It handles **automatically** all three types of data (QENS, EFWS, IFWS) recorded with or without mirror sense.
 Note, that following the standard, the ``Unit`` for energy transfer (``DeltaE``) will be mili-elevtron-volts (``mev``).
 This algorithm is intended to handle only single file at a time, although if multiple files are given, they will be automatically summed at raw level, i.e. while loading.
diff --git a/docs/source/algorithms/LoadDNSLegacy-v1.rst b/docs/source/algorithms/LoadDNSLegacy-v1.rst
index 96b17ef1347e24281e2c9e7153be137f458cdab7..ebe85d2b41d2bab38e634cc1d4694c7fd83347c1 100644
--- a/docs/source/algorithms/LoadDNSLegacy-v1.rst
+++ b/docs/source/algorithms/LoadDNSLegacy-v1.rst
@@ -14,7 +14,13 @@ Description
    This algorithm is being developed for a specific instrument. It might get changed or even 
    removed without a notification, should instrument scientists decide to do so.
 
-This algorithm loads a DNS legacy data file into a :ref:`Workspace2D <Workspace2D>`. The loader rotates the detector bank in the position given in the data file. 
+This algorithm loads a DNS legacy data file into a :ref:`Workspace2D <Workspace2D>`. The loader rotates the detector bank in the position given in the data file.
+
+**Output**
+
+- For diffraction mode data (only one time channel) output is the :ref:`Workspace2D <Workspace2D>` with the X-axis in the wavelength units.
+
+- For TOF data (more than one time channel) output is the :ref:`Workspace2D <Workspace2D>` with the X-axis in TOF units. The lower bin boundary for the channel :math:`i`, :math:`t_i` is calculated as :math:`t_i = t_1 + t_{delay} + i*\Delta t`, where :math:`\Delta t` is the channel width and :math:`t_1` is the time-of-flight from the source (chopper) to sample. Given in the data file channel width is scaled by the *channel_width_factor* which can be set in the :ref:`parameter file <InstrumentParameterFile>`.
 
 **Normalization**
 
@@ -28,7 +34,9 @@ The **Normalization** option offers the following choices:
 
 **Polarisation**
 
-Since polarisation is not specified in the DNS legacy files, coil currents table is required to lookup for the polarisation and set the *polarisation* sample log. The coil currents table is a text file containing the following table.
+Since polarisation is not specified in the DNS legacy files, coil currents table is required to lookup for the polarisation and set the *polarisation* sample log. The default coil currents are given as *x_currents*, *y_currents* and *z_currents* parameters in the :ref:`parameter file <InstrumentParameterFile>` for x, y, and z polarisations, respectively.
+
+Alternatively, the text file with the coil currents table may be provided (optionally). The coil currents table is a text file containing the following table.
 
 +--------------+----------+-------+-------+-------+-------+
 | polarisation | comment  |  C_a  |  C_b  |  C_c  |  C_z  |
@@ -42,9 +50,9 @@ Since polarisation is not specified in the DNS legacy files, coil currents table
 |      x       |    7     |   0   | -2.1  | -0.97 |  2.21 |          
 +--------------+----------+-------+-------+-------+-------+
 
-First row must contain the listed column headers, other rows contain coil currents for each polarisation. Rows with different currents for one polarisation are alowed. Columns are separated by tab symbols. This table must be provided to the user by instrument scientist.  
+First row must contain the listed column headers, other rows contain coil currents for each polarisation. Rows with different currents for one polarisation are alowed. Columns are separated by tab symbols.
 
-This algorithm only supports DNS instrument in its configuration before major upgrade. 
+This algorithm only supports DNS instrument in its configuration with one detector bank (polarisation analysis).
 
 Usage
 -----
@@ -55,10 +63,9 @@ Usage
 
    # data file.
    datafile = 'dn134011vana.d_dat'
-   coilcurrents = 'currents.txt'
 
    # Load dataset
-   ws = LoadDNSLegacy(datafile, Normalization='monitor', CoilCurrentsTable=coilcurrents)
+   ws = LoadDNSLegacy(datafile, Normalization='monitor')
 
    print "This workspace has", ws.getNumDims(), "dimensions and has", ws.getNumberHistograms(), "histograms."
 
diff --git a/docs/source/algorithms/PredictPeaks-v1.rst b/docs/source/algorithms/PredictPeaks-v1.rst
index 8c49868b57daf4e45481511c30e0dfc4147adf34..03f12014258a3b441590388efa789facdaf834df 100644
--- a/docs/source/algorithms/PredictPeaks-v1.rst
+++ b/docs/source/algorithms/PredictPeaks-v1.rst
@@ -75,7 +75,7 @@ with predicted structure factor very close to 0, which are absent:
 
 .. testoutput:: ExPredictPeaksCrystalStructure
 
-    There are 294 detectable peaks.
+    There are 295 detectable peaks.
     Maximum intensity: 6101.93
     Peaks with relative intensity < 1%: 94
     Number of absences: 16
diff --git a/docs/source/algorithms/ReflectometryReductionOne-v2.rst b/docs/source/algorithms/ReflectometryReductionOne-v2.rst
index 071cf6434a864b19e69dfa3ac3efc8eb0b4d7fd7..6cc19f9820acc6effa711e0687e808217bb2738a 100644
--- a/docs/source/algorithms/ReflectometryReductionOne-v2.rst
+++ b/docs/source/algorithms/ReflectometryReductionOne-v2.rst
@@ -183,10 +183,10 @@ Output:
 
 .. testoutput:: ExReflRedOneTrans
 
-   0.4588
-   0.4655
-   0.7336
-   1.0156
+   0.4592
+   0.4654
+   0.7278
+   1.0305
 
 .. categories::
 
diff --git a/docs/source/algorithms/SANSCalculateTransmission-v1.rst b/docs/source/algorithms/SANSCalculateTransmission-v1.rst
new file mode 100644
index 0000000000000000000000000000000000000000..680e22aa1a6282936bfa9854bee92e304836846a
--- /dev/null
+++ b/docs/source/algorithms/SANSCalculateTransmission-v1.rst
@@ -0,0 +1,97 @@
+.. algorithm::
+
+.. summary::
+
+.. alias::
+
+.. properties::
+
+Description
+-----------
+
+This algorithm provides a transmission workspace for subsequent wavelength correction in :ref:`algm-Q1D` or  :ref:`algm-Qxy`.
+The settings of the algorithm are provided by the state object. The user provides a *TransmissionWorkspace*,
+*DirectWorkspace* and the data type which is to be used, ie *Sample* or *Can*. The *OutputWorkspace* is a fitted
+workspace, but the unfitted data set is available via *UnfittedData*.
+
+Currently the mask mechanism is implemented for **SANS2D**, **LOQ** and **LARMOR**.
+
+
+Relevant SANSState entries for SANSCalculateTransmission
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The required information for the transmission calculation is retrieved from a state object.
+
+The elements are:
+
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| Entry                           | Type           | Description                         | Mandatory          | Default|
++=================================+================+=====================================+====================+========+
+| transmission_radius_on_detector | Float          | A radius on the detector            | No                 | None   |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| transmission_roi_files          | List of String | A list of ROI file names            | No                 | None   |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| transmission_mask_files         | List of String | A list of mask file names           | No                 | None   |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| default_transmission_monitor    | Integer        | The default transmission monitor    | auto setup         | auto   |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| transmission_monitor            | Integer        | The transmission monitor            | No                 | None   |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| default_incident_monitor        | Integer        | The default incident monitor        | auto setup         | auto   |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| incident_monitor                | Integer        | The incident monitor                | No                 | None   |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| prompt_peak_correction_min      | Float          | Min time of the prompt peak         | No                 | None   |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| prompt_peak_correction_max      | Float          | Max time of the prompt peak         | No                 | None   |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| prompt_peak_correction_enabled  | Bool           | If using prompt peak correction     | No                 | False  |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| use_full_wavelength_range       | Bool           | If using full wavelength range      | No                 | False  |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| wavelength_full_range_low       | Float          | Min of instrument's full wav. range | auto setup         | auto   |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| wavelength_full_range_high      | Float          | Max of instrument's full wav. range | auto setup         | auto   |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| background_TOF_general_start    | Float          | General background corr. start time | No                 | None   |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| background_TOF_general_stop     | Float          | General background corr. stop time  | No                 | None   |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| background_TOF_monitor_start    | Dict           | Monitor num vs background corr.     | No                 | None   |
+|                                 |                | start time                          |                    |        |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| background_TOF_monitor_stop     | Dict           | Monitor num vs background corr.     | No                 | None   |
+|                                 |                | stop time                           |                    |        |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| background_TOF_roi_start        | Float          | ROI background corr. start time     | No                 | None   |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| background_TOF_roi_stop         | Float          | ROI background corr. stop time      | No                 | None   |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| fit                             | Dict           | DataType enum (Sample and Can) vs   | No                 | None   |
+|                                 |                | fit state                           |                    |        |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+
+There is a fit state for the sample and the can which contains the following settings:
+
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| Entry                           | Type           | Description                         | Mandatory          | Default|
++=================================+================+=====================================+====================+========+
+| fit_type                        | FitType enum   | The type of fit, ie log, lin, poly. | No                 | log    |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| polynomial_order                | Integer        | The polynomial order when using     | No                 | 0      |
+|                                 |                | polynomial fitting                  |                    |        |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| wavelength_low                  | Float          | Lower wavelength bound              | No                 | None   |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| wavelength_high                 | Float          | Upper wavelength bound              | No                 | None   |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+
+
+Note that the prompt peak settings are automatically set up for **LOQ**
+
+**Note that these settings should be only populated via the GUI or the Python Interface of ISIS SANS.**
+
+
+.. categories::
+
+.. sourcelink::
diff --git a/docs/source/algorithms/SANSConvertToQ-v1.rst b/docs/source/algorithms/SANSConvertToQ-v1.rst
index e876312fcb767d12db1f8477a2854203191b37df..ae5eaf71e1d9a01b663ecd1191b7ebf0530937c6 100644
--- a/docs/source/algorithms/SANSConvertToQ-v1.rst
+++ b/docs/source/algorithms/SANSConvertToQ-v1.rst
@@ -107,6 +107,9 @@ The elements are:
 
 Note that the momentum transfer resolution calculation is only applicable for 1D reductions.
 
+**Note that these settings should be only populated via the GUI or the Python Interface of ISIS SANS.**
+
+
 .. categories::
 
 .. sourcelink::
diff --git a/docs/source/algorithms/SANSConvertToWavelength-v1.rst b/docs/source/algorithms/SANSConvertToWavelength-v1.rst
index 4baa1d4addb2b69e90787f7dfc4cfde5b12d5384..e1809d3c9ead04bfe71e1976d43dd34a1b821849 100644
--- a/docs/source/algorithms/SANSConvertToWavelength-v1.rst
+++ b/docs/source/algorithms/SANSConvertToWavelength-v1.rst
@@ -36,6 +36,7 @@ The elements of the wavelength conversion state are:
 | wavelength_step_type | RangeStepType enum | Wavelength step type                         | No         | None          |
 +----------------------+--------------------+----------------------------------------------+------------+---------------+
 
+**Note that these settings should be only populated via the GUI or the Python Interface of ISIS SANS.**
 
 
 .. categories::
diff --git a/docs/source/algorithms/SANSCreateAdjustmentWorkspaces-v1.rst b/docs/source/algorithms/SANSCreateAdjustmentWorkspaces-v1.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d002ddc8f2b1cb6a7fc42930c25e309733ac52e4
--- /dev/null
+++ b/docs/source/algorithms/SANSCreateAdjustmentWorkspaces-v1.rst
@@ -0,0 +1,50 @@
+.. algorithm::
+
+.. summary::
+
+.. alias::
+
+.. properties::
+
+Description
+-----------
+
+This algorithm provides a wavelength-adjustment workspace, a pixel-adjustment workspace and a wavelength-and-pixel-adjustment
+workspace which are used in :ref:`algm-Q1D` or  :ref:`algm-Qxy`. The wavelength-adjustment and the pixel-adjustment workspaces
+are obtained from :ref:`algm-SANSCreateWavelengthAndPixelAdjustment` and the wavelength-and-pixel-adjustment workspace is
+obtained from :ref:`algm-SANSWideAngleCorrection`. The relevant settings are provided via the state object. Note
+that none of these workspaces is required for a minimal reduction.
+
+Currently the mask mechanism is implemented for **SANS2D**, **LOQ** and **LARMOR**.
+
+
+Relevant SANSState entries for SANSCreateAdjustmentWorkspaces
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The required information for the creation of the wavelength-adjustment workspace, the pixel-adjustment workspace
+and the wavelength-and-pixel-adjustment workspace is  retrieved from a state object. Note that this state
+is a composite of states which are consumed in the child algorithms.
+
+The elements are:
+
++---------------------------------+-----------+---------------------------------------------------+-----------+--------+
+| Entry                           | Type      | Description                                       | Mandatory | Default|
++=================================+===========+===================================================+===========+========+
+| calculate_transmission          | sub state | State used in                                     | Yes       | None   |
+|                                 |           | :ref:`algm-SANSCalculateTransmission`             |           |        |
++---------------------------------+-----------+---------------------------------------------------+-----------+--------+
+| normalize_to_monitor            | sub state | State used in                                     | Yes       | None   |
+|                                 |           | :ref:`algm-SANSNormalizeToMonitor`                |           |        |
++---------------------------------+-----------+---------------------------------------------------+-----------+--------+
+| wavelength_and_pixel_adjustment | sub state | State used in                                     | Yes       | None   |
+|                                 |           | :ref:`algm-SANSCreateWavelengthAndPixelAdjustment`|           |        |
++---------------------------------+-----------+---------------------------------------------------+-----------+--------+
+| wide_angle_correction           | Bool      | The wavelength step type                          | No        | False  |
++---------------------------------+-----------+---------------------------------------------------+-----------+--------+
+
+**Note that these settings should be only populated via the GUI or the Python Interface of ISIS SANS.**
+
+
+.. categories::
+
+.. sourcelink::
diff --git a/docs/source/algorithms/SANSCreateWavelengthAndPixelAdjustment-v1.rst b/docs/source/algorithms/SANSCreateWavelengthAndPixelAdjustment-v1.rst
new file mode 100644
index 0000000000000000000000000000000000000000..8e7a78335a976f6b86b820708e10ce8766577597
--- /dev/null
+++ b/docs/source/algorithms/SANSCreateWavelengthAndPixelAdjustment-v1.rst
@@ -0,0 +1,53 @@
+.. algorithm::
+
+.. summary::
+
+.. alias::
+
+.. properties::
+
+Description
+-----------
+
+This algorithm provides a wavelength-adjustment workspace and a pixel-adjustment workspace which are used in :ref:`algm-Q1D` or  :ref:`algm-Qxy`.
+The wavelength-adjustment workspace is created by combining the transmission workspace obtained
+from  :ref:`algm-SANSCalculateTransmission`, the monitor normalization workspace obtained from :ref:`algm-SANSNormalizeToMonitor` and
+efficiency correction files which are provided by the state object. The pixel-adjustment settings are also obtained
+from the state object.
+
+
+Currently the mask mechanism is implemented for **SANS2D**, **LOQ** and **LARMOR**.
+
+
+Relevant SANSState entries for SANSCreateWavelengthAndPixelAdjustment
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The required information for the creation of the wavelength-adjustment workspace and the pixel-adjustment workspace is retrieved from a state object.
+
+The elements are:
+
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| Entry                           | Type           | Description                         | Mandatory          | Default|
++=================================+================+=====================================+====================+========+
+| wavelength_low                  | Float          | Lower wavelength bound              | No                 | None   |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| wavelength_high                 | Float          | Upper wavelength bound              | No                 | None   |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| wavelength_step                 | Float          | Wavelength step                     | No                 | None   |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| wavelength_step_type            | RangeStepType  | The wavelength step type            | No                 | None   |
+|                                 | enum           |                                     |                    |        |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| adjustment_files                | Dict           | Detector vs StateAdjustmentFiles    | No                 | None   |
+|                                 |                | object                              |                    |        |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| idf_path                        | String         | The path to the IDF                 | auto setup         | auto   |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+
+
+**Note that these settings should be only populated via the GUI or the Python Interface of ISIS SANS.**
+
+
+.. categories::
+
+.. sourcelink::
diff --git a/docs/source/algorithms/SANSCrop-v1.rst b/docs/source/algorithms/SANSCrop-v1.rst
index 9e5aa7fb7db119c0d8cd4bb321a7e56c4b23a4f5..340047d864844ae6308053849a92d1c5169c9de2 100644
--- a/docs/source/algorithms/SANSCrop-v1.rst
+++ b/docs/source/algorithms/SANSCrop-v1.rst
@@ -12,7 +12,7 @@ Description
 This algorithm allows to crop a particular detector bank from a workspace. The supported configurations are *LAB* and *HAB*. Currently this crop mechanism is implemented for **SANS2D**, **LOQ** and **LARMOR**.
 
 Component setting: *LAB* and *HAB*
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 The mapping of this setting is:
 
diff --git a/docs/source/algorithms/SANSLoad-v1.rst b/docs/source/algorithms/SANSLoad-v1.rst
index fdd8605077d152b807a9cfb6da82d25779bf343d..1ce6490e87c835ff8159f36d00d5bc778efba538 100644
--- a/docs/source/algorithms/SANSLoad-v1.rst
+++ b/docs/source/algorithms/SANSLoad-v1.rst
@@ -71,7 +71,7 @@ The elements of the SANSState are:
 +--------------------------------+---------------------+------------------------------------------+----------------------------------------------+
 
 
-Note that these settings should be only populated via the GUI or the Python Interface of ISIS SANS.
+**Note that these settings should be only populated via the GUI or the Python Interface of ISIS SANS.**
 
 Optimization Setting: *PublishToCache* and *UseCached*
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/docs/source/algorithms/SANSMaskWorkspace-v1.rst b/docs/source/algorithms/SANSMaskWorkspace-v1.rst
index 61d5e81358fb1566c702b47e9f062787dd4655fa..8ab90994988c0d48995a5022478c53d38399ad79 100644
--- a/docs/source/algorithms/SANSMaskWorkspace-v1.rst
+++ b/docs/source/algorithms/SANSMaskWorkspace-v1.rst
@@ -117,7 +117,7 @@ The detectors dictionary above maps to a mask state object for the individual de
 +-----------------------------+-----------------+--------------------------------------+------------+---------------+
 
 
-Note that these settings should be only populated via the GUI or the Python Interface of ISIS SANS.
+**Note that these settings should be only populated via the GUI or the Python Interface of ISIS SANS.**
 
 
 Mask options for the detector: *LAB*, *HAB*
diff --git a/docs/source/algorithms/SANSMove-v1.rst b/docs/source/algorithms/SANSMove-v1.rst
index 2dab9bba62766581fec3b7ae760f1995d5d64eb8..f9961d61393e7c0e11e16eb4fa67420f8a14fa2d 100644
--- a/docs/source/algorithms/SANSMove-v1.rst
+++ b/docs/source/algorithms/SANSMove-v1.rst
@@ -120,7 +120,7 @@ For LARMOR
 +----------------+-------+-------------------------------------------------+------------+---------------+
 
 
-Note that these settings should be only populated via the GUI or the Python Interface of ISIS SANS.
+**Note that these settings should be only populated via the GUI or the Python Interface of ISIS SANS.**
 
 
 Move options: *InitialMove*, *ElementaryDisplacement*, *SetToZero*
diff --git a/docs/source/algorithms/SANSNormalizeToMonitor-v1.rst b/docs/source/algorithms/SANSNormalizeToMonitor-v1.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1ae60263d7abb455e1c319b2152f72e618f4678b
--- /dev/null
+++ b/docs/source/algorithms/SANSNormalizeToMonitor-v1.rst
@@ -0,0 +1,69 @@
+.. algorithm::
+
+.. summary::
+
+.. alias::
+
+.. properties::
+
+Description
+-----------
+
+This algorithm provides a monitor normalization workspace for subsequent wavelength correction in :ref:`algm-Q1D` or  :ref:`algm-Qxy`.
+The settings of the algorithm are provided by the state object. The user can provide a *ScaleFactor* which is normally
+obtained during event slicing.
+
+Currently the mask mechanism is implemented for **SANS2D**, **LOQ** and **LARMOR**.
+
+
+Relevant SANSState entries for SANSNormalizeToMonitor
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The required information for the monitor normalization is retrieved from a state object.
+
+The elements are:
+
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| Entry                           | Type           | Description                         | Mandatory          | Default|
++=================================+================+=====================================+====================+========+
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| prompt_peak_correction_min      | Float          | Min time of the prompt peak         | No                 | None   |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| prompt_peak_correction_max      | Float          | Max time of the prompt peak         | No                 | None   |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| prompt_peak_correction_enabled  | Bool           | If using prompt peak correction     | No                 | False  |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| rebin_type                      | RebinType enum | The type of rebinning to be used    | No                 | Rebin  |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| wavelength_low                  | Float          | Lower wavelength bound              | No                 | None   |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| wavelength_high                 | Float          | Upper wavelength bound              | No                 | None   |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| wavelength_step                 | Float          | Wavelength step                     | No                 | None   |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| wavelength_step_type            | RangeStepType  | Wavelength step type                | No                 | None   |
+|                                 | enum           |                                     |                    |        |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| incident_monitor                | Integer        | The incident monitor                | Yes                | None   |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| background_TOF_general_start    | Float          | General background corr. start time | No                 | None   |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| background_TOF_general_stop     | Float          | General background corr. stop time  | No                 | None   |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| background_TOF_monitor_start    | Dict           | Monitor num vs background corr.     | No                 | None   |
+|                                 |                | start time                          |                    |        |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+| background_TOF_monitor_stop     | Dict           | Monitor num vs background corr.     | No                 | None   |
+|                                 |                | stop time                           |                    |        |
++---------------------------------+----------------+-------------------------------------+--------------------+--------+
+
+
+Note that the prompt peak settings are automatically set up for **LOQ**
+
+**Note that these settings should be only populated via the GUI or the Python Interface of ISIS SANS.**
+
+
+
+.. categories::
+
+.. sourcelink::
diff --git a/docs/source/algorithms/SANSScale-v1.rst b/docs/source/algorithms/SANSScale-v1.rst
index a51495a1bd224e8398161a2ec13ba185fe179ce6..d02b6ea156e3a4226c96e3a82cee4e0e9126a5bf 100644
--- a/docs/source/algorithms/SANSScale-v1.rst
+++ b/docs/source/algorithms/SANSScale-v1.rst
@@ -44,7 +44,7 @@ The elements of the scale state are:
 +---------------------+------------------+--------------------------------------------------------+------------+---------------+
 
 
-Note that these settings should be only populated via the GUI or the Python Interface of ISIS SANS.
+**Note that these settings should be only populated via the GUI or the Python Interface of ISIS SANS.**
 
 
 
diff --git a/docs/source/algorithms/SANSSliceEvent-v1.rst b/docs/source/algorithms/SANSSliceEvent-v1.rst
index 03edb0f654715a63af58f67e43ad4da6a620b22c..3016309f4a6cbe39ab0c7be9738f0c6d980528df 100644
--- a/docs/source/algorithms/SANSSliceEvent-v1.rst
+++ b/docs/source/algorithms/SANSSliceEvent-v1.rst
@@ -15,7 +15,7 @@ if the slice is to be taken from a sample or a can workspace can be specified. N
 
 
 Relevant SANSState entries for SANSSlice
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 The required information for the slice operation is retrieved from a state object.
 
@@ -31,10 +31,10 @@ The elements of the slice state are:
 +-------------+---------------+-------------------------------------------------------+------------+---------------+
 
 
-Note that these settings should be only populated via the GUI or the Python Interface of ISIS SANS.
+**Note that these settings should be only populated via the GUI or the Python Interface of ISIS SANS.**
 
 Slice options for the data type: *Sample*, *Can*
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 The *Sample* setting performs regular slicing
 
diff --git a/docs/source/concepts/UnitFactory.rst b/docs/source/concepts/UnitFactory.rst
index 300333779b0c543e37bd86b1390055db8ee805e3..eadc3aa5b2c75272fd98ec6cf73a77f88566ffd2 100644
--- a/docs/source/concepts/UnitFactory.rst
+++ b/docs/source/concepts/UnitFactory.rst
@@ -49,6 +49,9 @@ The following units are available in the default Mantid distribution.
 | Spin Echo Time                            | SpinEchoTime                    | :math:`ns`                  | | :math:`constant \times \lambda^3`                                                                              |
 |                                           |                                 |                             | |  The constant is supplied in eFixed                                                                            |
 +-------------------------------------------+---------------------------------+-----------------------------+------------------------------------------------------------------------------------------------------------------+
+| d-spacingPerpendicular                    | dSpacingPerpendicular           | :math:`\mathrm{\AA}`        | :math:`d_{\perp} = \sqrt{\lambda^2 - 2\log\cos\theta}`                                                           |
++-------------------------------------------+---------------------------------+-----------------------------+------------------------------------------------------------------------------------------------------------------+
+
 
 Where :math:`L_1` and :math:`L_2` are sample to the source and sample to
 detector distances respectively, :math:`L_{tot} = L_1+L_2` and
@@ -65,6 +68,10 @@ energy respectively. Units conversion into elastic momentum transfer
 (MomentumTransfer) will throw in elastic mode (emode=0) on inelastic
 workspace (when energy transfer is specified along x-axis)
 
+**d-spacingPerpendicular** is a unit invented in `J. Appl. Cryst. (2015) 48, pp. 1627--1636 <https://doi.org/10.1107/S1600576715016520>`_ for 2D Rietveld refinement
+of angular and wavelength-dispersive neutron time-of-flight powder diffraction data. Together with the d-Spacing :math:`d`,
+d-SpacingPerpendicular :math:`d_{\perp}` forms a new orthogonal coordinate system.
+
 
 Working with Units in Python
 ----------------------------
@@ -136,4 +143,4 @@ and we will add it to the default Mantid library.
 
 
 
-.. categories:: Concepts
\ No newline at end of file
+.. categories:: Concepts
diff --git a/docs/source/fitfunctions/DynamicKuboToyabe.rst b/docs/source/fitfunctions/DynamicKuboToyabe.rst
index d23576ee075e45dfe163911fcf67baafbc1a32d9..99181d2261bbacb29b6688e5e525f307c473d1a5 100644
--- a/docs/source/fitfunctions/DynamicKuboToyabe.rst
+++ b/docs/source/fitfunctions/DynamicKuboToyabe.rst
@@ -22,7 +22,7 @@ where :math:`g_z\left(t\right)` is the static KT function, and :math:`\nu` the m
 
 | In the presence of a longitudinal field, :math:`B_0=\omega_0 /\left(2\pi \gamma_{\mu}\right)>0`: 
 
-.. math:: g_z\left(t\right) = \mbox{A} \Bigg[ 1 - 2\frac{\Delta^2}{\omega_0^2}\Big(1-cos(\omega_0 t)e^{-\frac{1}{2}\Delta^2 t^2}\Big) + 2\frac{\Delta^4}{\omega_0^4}\omega_0\int_0^\tau \sin(\omega_0\tau)e^{-\frac{1}{2}\Delta^2\tau^2}d\tau \Bigg]
+.. math:: g_z\left(t\right) = \mbox{A} \Bigg[ 1 - 2\frac{\Delta^2}{\omega_0^2}\Big(1-cos(\omega_0 t)e^{-\frac{1}{2}\Delta^2 t^2}\Big) + 2\frac{\Delta^4}{\omega_0^4}\omega_0\int_0^t \sin(\omega_0\tau)e^{-\frac{1}{2}\Delta^2\tau^2}d\tau \Bigg]
 
 DynamicKuboToyabe function has one attribute (non-fitting parameter), 'BinWidth', that sets the width of the step size between points for numerical integration. Note that 
 small values will lead to long calculation times, while large values will produce less accurate results. The default value is set to 0.05, and it is allowed to vary in the range [0.001,0.1].
diff --git a/docs/source/images/ArtRightGUIWaterfallCustom2sp1.PNG b/docs/source/images/ArtRightGUIWaterfallCustom2sp1.PNG
new file mode 100644
index 0000000000000000000000000000000000000000..499eacb0029313d869876347795ecd7d1fc3cdb7
Binary files /dev/null and b/docs/source/images/ArtRightGUIWaterfallCustom2sp1.PNG differ
diff --git a/docs/source/images/ArtSurfacePlotT1.PNG b/docs/source/images/ArtSurfacePlotT1.PNG
new file mode 100644
index 0000000000000000000000000000000000000000..bade121e5d5e7df5797f07f6720b6a0e423bb2f2
Binary files /dev/null and b/docs/source/images/ArtSurfacePlotT1.PNG differ
diff --git a/docs/source/images/ArtWaterfallT1.PNG b/docs/source/images/ArtWaterfallT1.PNG
new file mode 100644
index 0000000000000000000000000000000000000000..4ffcb85671223500f41ad4c4aebb0d2834994190
Binary files /dev/null and b/docs/source/images/ArtWaterfallT1.PNG differ
diff --git a/docs/source/images/DeltaPDF3D_deconv.png b/docs/source/images/DeltaPDF3D_deconv.png
new file mode 100644
index 0000000000000000000000000000000000000000..d1380625134079ee9d212571435393f6895043f2
Binary files /dev/null and b/docs/source/images/DeltaPDF3D_deconv.png differ
diff --git a/docs/source/images/DeltaPDF3D_fft1.png b/docs/source/images/DeltaPDF3D_fft1.png
index 6b11a18065a1d69758bcc2288aa42d9ebe144a43..83161f640717bc52ffe009960345e3d74fa3daeb 100644
Binary files a/docs/source/images/DeltaPDF3D_fft1.png and b/docs/source/images/DeltaPDF3D_fft1.png differ
diff --git a/docs/source/images/DeltaPDF3D_fft2.png b/docs/source/images/DeltaPDF3D_fft2.png
index 9fb8c75f45c63b084c0212106c46df4d70c7ee2e..7c9a6f95b3b1e833682eaf3e0b11517383dbb296 100644
Binary files a/docs/source/images/DeltaPDF3D_fft2.png and b/docs/source/images/DeltaPDF3D_fft2.png differ
diff --git a/docs/source/images/DeltaPDF3D_fft3.png b/docs/source/images/DeltaPDF3D_fft3.png
index 0e2b058b780c8fefcd1521f7d4ce7c665e2305d6..d839a4b6343e6e71248b3e970f347280d999b31c 100644
Binary files a/docs/source/images/DeltaPDF3D_fft3.png and b/docs/source/images/DeltaPDF3D_fft3.png differ
diff --git a/docs/source/images/DeltaPDF3D_fft3_2.png b/docs/source/images/DeltaPDF3D_fft3_2.png
new file mode 100644
index 0000000000000000000000000000000000000000..c162288bd9246f66591c23cb8bae6c92da636a97
Binary files /dev/null and b/docs/source/images/DeltaPDF3D_fft3_2.png differ
diff --git a/docs/source/images/DeltaPDF3D_fft4.png b/docs/source/images/DeltaPDF3D_fft4.png
index 1e6b5ea71fb5b294b36069fa592c0a938cc8cb9b..ab0d6e1e7e9ade711993f22f48775f3d1d3d8d38 100644
Binary files a/docs/source/images/DeltaPDF3D_fft4.png and b/docs/source/images/DeltaPDF3D_fft4.png differ
diff --git a/docs/source/images/DeltaPDF3D_fft5.png b/docs/source/images/DeltaPDF3D_fft5.png
new file mode 100644
index 0000000000000000000000000000000000000000..c5c6ccd59f09cf1a778d4a057945d16dd5f8e4f1
Binary files /dev/null and b/docs/source/images/DeltaPDF3D_fft5.png differ
diff --git a/docs/source/images/DeltaPDF3D_int2.png b/docs/source/images/DeltaPDF3D_int2.png
index 3cd7f67c63af2219e1e728ca7bb2590104e5a305..24982081d4561802aa93b5c09f03797b3ba36ca7 100644
Binary files a/docs/source/images/DeltaPDF3D_int2.png and b/docs/source/images/DeltaPDF3D_int2.png differ
diff --git a/docs/source/images/DeltaPDF3D_int3.png b/docs/source/images/DeltaPDF3D_int3.png
index 4024a397adb90eb15216c86ea6e37f7635b8a496..a5cc9676e84ba02940fc7ea38f92b1276fc241d1 100644
Binary files a/docs/source/images/DeltaPDF3D_int3.png and b/docs/source/images/DeltaPDF3D_int3.png differ
diff --git a/docs/source/images/DeltaPDF3D_int3_2.png b/docs/source/images/DeltaPDF3D_int3_2.png
new file mode 100644
index 0000000000000000000000000000000000000000..acdc24e817d50310d285ed830dc286404e4d5971
Binary files /dev/null and b/docs/source/images/DeltaPDF3D_int3_2.png differ
diff --git a/docs/source/images/DeltaPDF3D_int4.png b/docs/source/images/DeltaPDF3D_int4.png
index 36511b954e3aa7d3eb1fedd380e9e17beb3ed518..803b9c876613361c8a0ddbb4276f6d7e2196e575 100644
Binary files a/docs/source/images/DeltaPDF3D_int4.png and b/docs/source/images/DeltaPDF3D_int4.png differ
diff --git a/docs/source/images/DeltaPDF3D_testWS.png b/docs/source/images/DeltaPDF3D_testWS.png
index 90c910e35c904bf232378f6d5614d6d7e4337532..fd2cb7d0100448627a57a6b48ebd469a736804e3 100644
Binary files a/docs/source/images/DeltaPDF3D_testWS.png and b/docs/source/images/DeltaPDF3D_testWS.png differ
diff --git a/docs/source/images/ISISReflectometryPolref_event_handling_tab.png b/docs/source/images/ISISReflectometryPolref_event_handling_tab.png
index b50617345636919b5186bf6f008fe17423282e0a..1fcb17c8aba8a2e299d25e9f4592d53f263982a0 100644
Binary files a/docs/source/images/ISISReflectometryPolref_event_handling_tab.png and b/docs/source/images/ISISReflectometryPolref_event_handling_tab.png differ
diff --git a/docs/source/images/MuonAnalysisCombinePeriods.png b/docs/source/images/MuonAnalysisCombinePeriods.png
new file mode 100644
index 0000000000000000000000000000000000000000..74ad970345e99d7d681d994762f459be944a9da1
Binary files /dev/null and b/docs/source/images/MuonAnalysisCombinePeriods.png differ
diff --git a/docs/source/images/MuonAnalysisDataAnalysis3.10.png b/docs/source/images/MuonAnalysisDataAnalysis3.10.png
new file mode 100644
index 0000000000000000000000000000000000000000..8a2bbd2e7b69fb5b1b5862ef4238e7b98343b94e
Binary files /dev/null and b/docs/source/images/MuonAnalysisDataAnalysis3.10.png differ
diff --git a/docs/source/images/MuonAnalysisTFAsymm.png b/docs/source/images/MuonAnalysisTFAsymm.png
new file mode 100644
index 0000000000000000000000000000000000000000..a0bdf1429dd413de5cf8d0af6d7618207d7ee6d0
Binary files /dev/null and b/docs/source/images/MuonAnalysisTFAsymm.png differ
diff --git a/docs/source/interfaces/CrystalFieldPythonInterface.rst b/docs/source/interfaces/CrystalFieldPythonInterface.rst
index 1854e7482328be0298fe9e41a313c23c7b49ae24..7822d4345a4e07307b579354633cfa0eaf345db1 100644
--- a/docs/source/interfaces/CrystalFieldPythonInterface.rst
+++ b/docs/source/interfaces/CrystalFieldPythonInterface.rst
@@ -114,12 +114,12 @@ The new output::
   
 To calculate a spectrum we need to define a shape of each peak (peak profile function) and its default width (`FWHM`).
 The width can be set either via a keyword argument or a property with name `FWHM`. If the peak shape isn't set the default
-of Lorentzian is assumed. To set a different shape use the `setPeaks` method::
+of Lorentzian is assumed. To set a different shape use the `PeakShape` property::
 
-  cf.setPeaks('Gaussian')
+  cf.PeakShape = 'Gaussian'
   cf.FWHM = 0.9
   
-The arguments of `setPeaks` are expected to be names of Mantid peak fit functions. At the moment only `Lorentzian` and
+The values of `PeakShape` are expected to be names of Mantid peak fit functions. At the moment only `Lorentzian` and
 `Gaussian` can be used.
 
 After the peak shape is defined a spectrum can be calculated::
@@ -212,11 +212,11 @@ For the parameters of the background the syntax is the same but the methods are
 The names of the peak parameters both in ties and constraints must include the index of the peak to which they belong. Here we follow
 the naming convention of the :ref:`func-CompositeFunction`: f<n>.<name>, where <n> stands for an integer index staring at 0 and <name>
 is the name of the parameter. For example, `f1.Sigma`, `f3.FWHM`. Because names now contain the period symbol '.' keyword arguments
-cannot be used. Instead we must pass strings containing ties::
+cannot be used. Instead we must pass a dictionary containing ties. The keys are parameter names and the values are the ties::
 
-    cf.peaks.ties('f2.FWHM=2*f1.FWHM', 'f3.FWHM=2*f2.FWHM')
+    cf.peaks.ties({'f2.FWHM': '2*f1.FWHM', 'f3.FWHM': '2*f2.FWHM'})
     
-and constraints are also a list of strings::
+Constraints are a list of strings::
 
     cf.peaks.constraints('f0.FWHM < 2.2', 'f1.FWHM >= 0.1')
     
@@ -226,13 +226,13 @@ If a parameter of all peaks needs to be tied/constrained with the same expressio
     cf.peaks.constrainAll('0 < Sigma < 0.1', 4)
 
 where the first argument is the general formula of the tie/constraint and the second is the number of peaks to apply to.
-The is also a version for a range of peak indices::
+There is also a version for a range of peak indices::
 
     cf.peaks.tieAll('Sigma=f0.Sigma', 1, 3)
 
 which is equivalent to::
 
-    cf.peaks.ties('f1.Sigma=f0.Sigma', 'f2.Sigma=f0.Sigma', 'f3.Sigma=f0.Sigma')
+    cf.peaks.ties({'f1.Sigma': 'f0.Sigma', 'f2.Sigma': 'f0.Sigma', 'f3.Sigma': 'f0.Sigma'})
 
 
 Setting Resolution Model
@@ -280,11 +280,11 @@ become lists. Here is an example of defining a `CrystalField` object with two sp
 
     cf = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544,
                       Temperature=[44.0, 50], FWHM=[1.1, 0.9])
-    cf.setPeaks('Lorentzian')
+    cf.PeakShape = 'Lorentzian'
     cf.peaks[0].param[0]['FWHM'] = 1.11
     cf.peaks[1].param[1]['FWHM'] = 1.12
-    cf.setBackground(peak=Function('Gaussian', Height=10, Sigma=0.3),
-                     background=Function('FlatBackground', A0=1.0))
+    cf.background = Background(peak=Function('Gaussian', Height=10, Sigma=0.3),
+                               background=Function('FlatBackground', A0=1.0))
     cf.background[1].peak.param['Sigma'] = 0.8
     cf.background[1].background.param['A0'] = 1.1
 
@@ -301,7 +301,7 @@ change::
     cf.background[1].peak.ties(Height=20.2)
     cf.background[1].peak.constraints('Sigma > 0.2')
     cf.peaks[1].tieAll('FWHM=2*f1.FWHM', 2, 5)
-    cf.peaks[0].constrainAll('FWHM < 2.2', 1, 6)
+    cf.peaks[0].constrainAll('FWHM < 2.2', 1, 4)
 
 The resolution model also needs to be initialised from a list::
 
@@ -328,6 +328,16 @@ To calculate a spectrum call the same method `getSpectrum` but pass the spectrum
   # Calculate first spectrum, use the i-th spectrum of a workspace
   sp = cf.getSpectrum(0, ws, i)
 
+Note that the attributes `Temperature`, `FWHM`, `peaks` and `background` may be set separately from the constructor, e.g.::
+
+    cf = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544)
+    cf.Temperature = [5, 50]
+
+However, each time that `Temperature` is set, if it defines a different number of spectra from the previous value
+(e.g. if `Temperature` was initially empty or `None` and is then defined as in the example above, or if `Temperature`
+was initially a scalar value but is then redefined to be a list or vice versa), then all `Ties`, `Constraints`,
+`FWHM` and `peaks` parameters are cleared. Any crystal field parameters previously defined will be retained, however.
+
 
 Multiple Ions
 -------------
@@ -652,7 +662,11 @@ or separately after construction::
     fit_moment.fit()
 
 Unfortunately only 1D datasets can be fitted (e.g. M(H, T) cannot be fitted as a simultaneous function of field and
-temperature).
+temperature). Also, note that setting the `PhysicalProperty` attribute after constructing the `CrystalField` object
+(e.g. running `cf.PhysicalProperty = PhysicalProperties('Cv')`) causes the number of datasets to change and will 
+clear all `Ties` and `Constraints` previously set, and also reset all `FWHM` and `peaks` to the default values (zero 
+for `FWHM` and `Lorentzian` for `peaks`). 
+
 
 Simultaneous Fitting of Physical Properties and Inelastic Neutron Spectra
 -------------------------------------------------------------------------
diff --git a/docs/source/interfaces/ISIS_Reflectometry.rst b/docs/source/interfaces/ISIS_Reflectometry.rst
index a6c9c856ec1cb403481059cae9deff1ee53e72f5..b80084d90db8148244946ca86e0f35711f7ef844 100644
--- a/docs/source/interfaces/ISIS_Reflectometry.rst
+++ b/docs/source/interfaces/ISIS_Reflectometry.rst
@@ -392,21 +392,23 @@ Event Handling tab
 .. figure:: /images/ISISReflectometryPolref_event_handling_tab.png
    :alt: Showing view of the settings tab.
 
-The *Event Handling* tab can be used to analyze event workspaces. It contains three text boxes for
-specifying uniform even, uniform and custom slicing respectively. Each of these slicing options are
-exclusive, no more than one can be applied. If the text box for the selected slicing method is empty
-no event analysis will be performed, runs will be loaded using
+The *Event Handling* tab can be used to analyze event workspaces. It contains four text boxes for
+specifying uniform even, uniform, custom and log value slicing respectively. Each of these slicing
+options are exclusive, no more than one can be applied. If the text box for the selected slicing
+method is empty no event analysis will be performed, runs will be loaded using
 :ref:`LoadISISNexus <algm-LoadISISNexus>` and analyzed as histogram workspaces. When this text box
 is not empty, runs will be loaded using :ref:`LoadEventNexus <algm-LoadEventNexus>` and the
-interface will try to parse the user input to obtain a set of start times and stop times. These
-define different time slices that will bepassed on to :ref:`FilterByTime <algm-FilterByTime>`. Each
-time slice will be normalized by the total proton charge and reduced as described in the previous
-section. Note that, if any of the runs in a group could not be loaded as an event workspace, the
-interface will load the runs within that group as histogram workspaces and no event analysis will
-be performed for that group. A warning message will be shown when the reduction is complete
-indicating that some groups could not be processed as event data.
-
-The three slicing options are described in more detail below:
+interface will try to parse the user input to obtain a set of start and stop values. These define
+different time slices that will be passed on to an appropriate filtering algorithm
+(:ref:`FilterByTime <algm-FilterByTime>` for uniform even, uniform and custom slicing,
+:ref:`FilterByLogValue <algm-FilterByLogValue>` for log value slicing). Each time slice will be
+normalized by the total proton charge and reduced as described in the previous section. Note that,
+if any of the runs in a group could not be loaded as an event workspace, the interface will load
+the runs within that group as histogram workspaces and no event analysis will be performed for that
+group. A warning message will be shown when the reduction is complete indicating that some groups
+could not be processed as event data.
+
+The four slicing options are described in more detail below:
 
 - **Uniform Even** - The interface obtains the start and end times of the run and divides it into
   a specified number of evenly-sized slices. For example given a run of duration 100 seconds,
@@ -430,6 +432,14 @@ The three slicing options are described in more detail below:
     ``200`` seconds after the start of the run, and the second one starting at ``200`` seconds
     and ending at ``300`` seconds.
 
+- **LogValue** - Like custom slicing this takes a list of comma-separated numbers and are parsed
+  in the same manner as shown above. The values however indicate the minimum and maximum values of
+  the logs we wish to filter rather than times. In addition, this takes a second entry 'Log Name'
+  which is the name of the log we wish to filter the run for. For example, given a run and entries
+  of ``100, 200, 300`` and ``proton_charge`` for slicing values and log name respectively, we would
+  produce two slices - the first containing all log values between ``100`` and ``200`` seconds, the
+  second containing all log values between ``200`` and ``300`` seconds.
+
 Workspaces will be named according to the index of the slice, e.g ``IvsQ_13460_slice_0``, ``IvsQ_13460_slice_1``, etc.
 
 Settings tab
diff --git a/docs/source/interfaces/Muon_Analysis.rst b/docs/source/interfaces/Muon_Analysis.rst
index cd0202371307d6ff776f2a25095fac4993f9eba9..4d75035bb40fa26624de914358fff03dcb366054 100644
--- a/docs/source/interfaces/Muon_Analysis.rst
+++ b/docs/source/interfaces/Muon_Analysis.rst
@@ -264,7 +264,8 @@ Data Analysis
 .. _DataAnalysis:
 
 This tab is designed for the user to make a fit against the data just plotted.
-Since Mantid 3.8, this tab has been enhanced to include fits of multiple datasets at once.
+Since Mantid 3.8 (upgraded in 3.10), this tab has been enhanced to include fits of multiple datasets at once.
+Since Mantid 3.10 a Transverse field (TF) Asymmetry mode has been added. 
 
 Default: multiple fitting disabled
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -281,13 +282,29 @@ When the tab is open, this fit property browser is used by default within Mantid
 Note that, in this mode, simultaneous fits are not possible.
 The intention is that this mode could be useful for users who are accustomed to the existing UI, or if a bug is found in the new UI.
 
+
+TF asymmetry enabled
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+The TF asymmetry mode can be enabled by checking the "TF Asymmetry" checkbox on the Settings_ tab.
+At present it is not possible to use multiple fitting and TF asymmetry, therefore it is not possible 
+to select both checkboxes. Loading transverse field asymmetry data into muon analysis will automatically
+enable TF asymmetry mode. 
+When this is activated, the data analysis tab has two main differences to the pre 3.8 version. Firstly there 
+is an additional row in the Data table (normalization). The second difference is the addition of the "TF
+Asymmetry Fit" button in the fitting tab. Selecting this fitting option will call the :ref:`Calculate Muon Asymmetry <algm-CalculateMuonAsymmetry>` algorithm. The user defined function will be the composite function from the interface.
+
+.. image::  ../images/MuonAnalysisTFAsymm.png
+   :align: right
+
+
 Multiple fitting enabled
 ^^^^^^^^^^^^^^^^^^^^^^^^
 
-The new multiple fitting functionality can be enabled by checking the "Enable multiple fitting" checkbox on the Settings_ tab.
-When this is activated, the tab is divided into three sections vertically.
+The multiple fitting functionality can be enabled by checking the "Enable multiple fitting" checkbox on the Settings_ tab.
+When this is activated, the tab is divided into multiple sections vertically.
 
-.. image::  ../images/MuonAnalysisDataAnalysis3.8.png
+.. image::  ../images/MuonAnalysisDataAnalysis3.10.png
    :align: right
 
 Fit Function
@@ -306,36 +323,34 @@ This button will open the "Edit local parameter values" dialog, which offers gre
 
 Data
 ^^^^
-The central section of the tab is the data selector, which controls the dataset(s) that will be fitted.
+The next section of the tab is the data selector, which controls the dataset(s) that will be fitted.
 By default, this will be a single dataset, the same as the data loaded on the Home_ tab.
 The dataset(s) can be changed here and, if more than one is selected, they will all be fitted simultaneously.
 
+The "Display Parameters For" boxes consist of a backwards button, a drop-down selection and a forward button. The drop-down list shows all datasets currently selected,
+and the left and right buttons cycle through them. The currently selected dataset has its parameters shown in the *Fit Function* (upper) widget, and will be plotted.
+
 For a multi-dataset fit, the "Label" box is enabled.
 This allows the user to input a label for the simultaneous fit.
 
-The drop-down list shows all datasets currently selected, and the left and right buttons cycle through them.
-The currently selected dataset has its parameters shown in the *Fit Function* (upper) widget, and will be plotted.
-
-Fits can be done across runs, groups, periods or all three.
-From left to right, the options to select are:
-
 Runs
 """"
 A single run, or range (*e.g. 15189-91, 15193*) can be typed into the box here.
 The radio buttons below control whether the runs should be co-added together or fitted separately in a simultaneous fit.
-It is also possible to adjust the start and end time here.
 
-Groups
-""""""
-There is a checkbox in this section for each group defined in the GroupingOptions_ tab.
-One or multiple groups can be selected.
+Data Table
+^^^^^^^^^^
+
+The data table allows the user to modify the selected data for the fitting. This includes the start and end times, which can also
+be updated by dragging the blue dashed lines in the plot. The "Groups/Pairs to fit" box provides a drop-down menu with three options (all groups, all pairs and custom). 
+Selecting custom will produce a pop-up box with tick boxes for each of the available groups and pairs. If a user wants to update the custom selection the 
+Groups/Pairs button can be pressed from the ReselectData_ section at the bottom ofthe tab (this is only enabled if a custom selection is set). Underneath displays the
+"Selected Groups". 
 
-Periods
-"""""""
-There is a checkbox in this section for each period of the data.
-(This section is only visible for multi-period data).
-One or multiple periods can be selected.
-In addition, the "Combination" option can be used to fit a sum or difference of periods.
+The next row is the "Periods to fit" option, which is only displayed for multiple period data. This will automatically be populated with
+each of the periods (e.g. 1,2,3) and a custom option. Selecting custom will produce a pop-up with checkboxes for all of the periods. Selecting custom will also enable the 
+"Periods" button in the ReselectData_ section
+and pressing this button will allow the user to alter their custom selection. 
 
 Examples/Use cases
 """"""""""""""""""
@@ -372,12 +387,27 @@ Examples/Use cases
    - It is, of course, possible to select several runs, groups, periods all at once and a simultaneous fit will be performed across all the selected datasets.
    - Example: MUSR{15189, 15190, 15191}, groups {*fwd*, *bwd*}, periods {1, 2}: 12 datasets in all.
 
-Options
-^^^^^^^
-The bottom of the tab contains selected fit options that can be adjusted, just as elsewhere in Mantid.
+Additional Options
+^^^^^^^^^^^^^^^^^^
+Near the bottom of the tab contains selected fit options that can be adjusted, just as elsewhere in Mantid.
 The only option specific to the Muon Analysis interface is *Fit To Raw Data*.
 When this option is set to *True*, the fitting process is done using the raw (unbinned) data, even if the DataBinning_ is set.
 
+Reselect data
+^^^^^^^^^^^^^
+
+.. _ReselectData:
+
+At the bottom of the tab is the "Reselect Data" section. This includes three buttons "Groups/Pairs", "Periods" and "Combine Periods". The "Groups/Pairs" and "Periods" 
+buttons are only when the relevant options in the data table are set to custom. Pressing the button will produce a pop-up that will allow the user to modify their selection. 
+
+The "Combine Periods" button is only enabled if multiple periods are available. Pressing the button will generate a pop-up with two boxes. The top one is for adding periods 
+(as a comma seperated list or with "+") and the bottom box is for subtraction (as a comma sepearted list). Everything in the top and bottom boxes are summed seperatley 
+and the results are then used in the subtraction. 
+
+.. image::  ../images/MuonAnalysisCombinePeriods.png
+   :align: right
+
 Sequential fitting
 ^^^^^^^^^^^^^^^^^^
 
diff --git a/docs/source/release/v3.10.0/diffraction.rst b/docs/source/release/v3.10.0/diffraction.rst
index 56c08737ec7691b9f12c8f42a8b6cbd1af0df776..4ef6cd79a7675e889905e31e2a3bf426d9f5d563 100644
--- a/docs/source/release/v3.10.0/diffraction.rst
+++ b/docs/source/release/v3.10.0/diffraction.rst
@@ -15,6 +15,7 @@ Crystal Improvements
  - :ref:`SaveIsawPeaks <algm-SaveIsawPeaks>` now saves the calibration data for all detector banks in instrument so the header may be longer
  - :ref:`LoadIsawPeaks <algm-LoadIsawPeaks>` now uses the calibration lines to calibrate the detectors banks for CORELLI
  - :ref:SCD Event Data Reduction interface and SCD_Reduction python scripts work with both nxs and h5 extensions for data file.
+ - :ref:`FindSxPeaks <algm-FindSXPeaks>` Resolved an issue where the algorithm failed on instruments with multiple detectors per spectrum.
 
 Engineering Diffraction
 -----------------------
@@ -23,7 +24,7 @@ Powder Diffraction
 ------------------
 
 - :ref:`AlignAndFocusPowder <algm-AlignAndFocusPowder>` Now supports supplying an a second ``.cal`` file for the ``GroupingFilename``.
-- New algorithm :ref:`AlignAndFocusPowderFromFiles <algm-AlignAndFocusPowderFromFiles>` is a wrapper around :ref:`AlignAndFocusPowder <algm-AlignAndFocusPowder>` which supports caching results
+- New algorithm :ref:`AlignAndFocusPowderFromFiles <algm-AlignAndFocusPowderFromFiles>` is a wrapper around :ref:`AlignAndFocusPowder <algm-AlignAndFocusPowder>` which supports caching results. :ref:`SNSPowderReduction <algm-SNSPowderReduction>` and :ref:`PDToPDFgetN <algm-PDToPDFgetN>` have been reworked to take advantage of this.
 - Bugfix in :ref:`SNAPReduce <algm-SNAPReduce>` with loading previous normalizations
 - :ref:`SNSPowderReduction <algm-SNSPowderReduction>` now supports splitters in format of ``MatrixWorkspace`` and general ``TableWorkspace``.
 - A new NOMAD instrument definition file with corrected values.
@@ -33,9 +34,9 @@ Single Crystal Diffraction
 
 - A new HB3A instrument definition file, for its 512 x 512 detector, is created.  Its valid period is from February 2017 to late April 2017.
 - An IDF for HB3A with 256 by 256 detectors was created.  It was dated from late April 2017 because its original detector has been switched back.
+- A Bug fix was added to the WISH instrument parameter file to prevent predicted peaks falling between tube gaps.
 - New algorithm :ref:`DeltaPDF3D <algm-DeltaPDF3D>` for calculating the 3D-deltaPDF from a HKL MDHistoWorkspace
 
-
 Full list of `diffraction <https://github.com/mantidproject/mantid/issues?q=is%3Aclosed+milestone%3A%22Release+3.10%22+label%3A%22Component%3A+Diffraction%22>`_
 and
 `imaging <https://github.com/mantidproject/mantid/issues?q=is%3Aclosed+milestone%3A%22Release+3.10%22+label%3A%22Component%3A+Imaging%22>`_ changes on GitHub.
diff --git a/docs/source/release/v3.10.0/indirect_inelastic.rst b/docs/source/release/v3.10.0/indirect_inelastic.rst
index 3b7dc52115c4719f0cbd57608c9fd9b65bd6d08e..a9f39f7b5fe2bba3ffd316664b37bbf9ebba3764 100644
--- a/docs/source/release/v3.10.0/indirect_inelastic.rst
+++ b/docs/source/release/v3.10.0/indirect_inelastic.rst
@@ -55,6 +55,10 @@ Improvements
 - OSIRIS diffraction now rebins container workspaces to match the sample workspace
 - :ref:`ISISIndirectDiffractionReduction <algm-ISISIndirectDiffractionReduction>` now fully supports VESUVIO data
 - Inelastic pixel ID's in BASIS instrument definition file grouped into continuous physical pixels.
+- Reduced number of workspaces produced by VESUVIO scripts
+- Added SortXAxis to Bayes Quasi and Stretch
+- Removed error bars as default
+
 
 
 Bugfixes
@@ -64,5 +68,6 @@ Bugfixes
 - *Abins*:  fix setting very small off-diagonal elements of b tensors
 - Fix errors from calling Rebin from VisionReduction.
 - Fixed validation of inputs in *CalculatePaalmanPings*
+- IN16_Definition.xml has been updated with a Monitor ID change from 19 to 29 to fix a duplicate identity issue
 
 `Full list of changes on GitHub <http://github.com/mantidproject/mantid/pulls?q=is%3Apr+milestone%3A%22Release+3.10%22+is%3Amerged+label%3A%22Component%3A+Indirect+Inelastic%22>`_
diff --git a/docs/source/release/v3.10.0/muon.rst b/docs/source/release/v3.10.0/muon.rst
index 80f4cefe6d7efa7374fde89ba261d9129a004b82..5427a663a81574639a921361e2c32e07aec9271e 100644
--- a/docs/source/release/v3.10.0/muon.rst
+++ b/docs/source/release/v3.10.0/muon.rst
@@ -9,7 +9,7 @@ Interfaces
 ----------
 Muon Analysis
 -  The new algorithms :ref:`EstimateMuonAsymmetryFromCounts <algm-EstimateMuonAsymmetryFromCounts-v1>`: and :ref:`CalculateMuonAsymmetry <algm-CalculateMuonAsymmetry-v1>` are now used in the muon analysis GUI.
-
+-  The main part of the multiple fitting GUI has been upgraded to be more user friendly.
 
 
 - Fixed a bug that meant transverse field asymmetry data was normalized to bin width. 
diff --git a/docs/source/release/v3.10.0/reflectometry.rst b/docs/source/release/v3.10.0/reflectometry.rst
index 3aa96ddd641ff9f6df311dc3b48e300cd5689693..5a2ef9d48b4380b8d11feb0f55aea5ffc3f6f1b3 100644
--- a/docs/source/release/v3.10.0/reflectometry.rst
+++ b/docs/source/release/v3.10.0/reflectometry.rst
@@ -12,10 +12,12 @@ Algorithms
 
 - :ref:`algm-SpecularReflectionPositionCorrect` - fixed a bug where entering
   an invalid detector or sample name would cause a segmentation fault.
-- The :ref:`algm-SpecularReflectionPositionCorrect` algorithm has a new property, ``DetectorCorrectionType``,
+- The :ref:`algm-SpecularReflectionPositionCorrect` algorithm has a new property, ``DetectorCorrectionType``, 
   which specifies whether detector positions should be corrected by a vertical  shift (default) or by a rotation around the sample position.
 - :ref:`algm-ReflectometryReductionOneAuto-v2` and :ref:`algm-CreateTransmissionWorkspaceAuto-v2` attempts to populate properties `StartOverlap` and `EndOverlap` with values from the IDF.
 - :ref:`algm-GroupDetectors-v2` peforms a more resilient validation of grouping pattern that is less likely to throw an exception.
+- :ref:`algm-ReflectometryReductionOneAuto-v2` - fixed a bug where processing instructions were not applied correctly to the specified transmission run.
+- :ref:`algm-ReflectometryReductionOne-v2` and :ref:`algm-ReflectometryReductionOneAuto-v2` have a new property, ``SummationType``, which specifies whether summation should be done in wavelength (default) or in Q. For summation in Q, there is an additional new property, ``ReductionType``, which should be used to specify whether the reduction is for a divergent beam or non-flat sample.
 
 ConvertToReflectometryQ
 -----------------------
@@ -39,8 +41,8 @@ ISIS Reflectometry
   - Ctrl+X copies the selected row(s) to the clipboard and deletes them.
 
 - A brief description about the columns in the table can be now accessed by using the *What's this* tool (last tool in the toolbar) and clicking on the column headers.
-- Added two more time slicing options in the 'Event Handling' tab for analysing event data - Uniform Even and Uniform slicing.
-- For custom slicing (and new slicing options), workspace slices are now identified by an index (e.g. ws_slice_0) instead of a start/stop time.
+- Added three more time slicing options in the 'Event Handling' tab for analysing event data - Uniform Even, Uniform and Log Value slicing.
+- For custom slicing (and new slicing options), workspace slices are now identified by an index (e.g. ws_slice_0) instead of a start/stop value.
 - The 'Get Defaults` button for 'Experiment Settings' in the 'Settings' tab now populates `StartOverlap` and `EndOverlap` text boxes with values from the IDF.
 
 ISIS Reflectometry (Old)
diff --git a/docs/source/release/v3.10.0/sans.rst b/docs/source/release/v3.10.0/sans.rst
index ac6e98c873e7093c56f88a353d3d7c20dab02e3f..cf014961138e23f2b454974dfa008cb18dfa4d6a 100644
--- a/docs/source/release/v3.10.0/sans.rst
+++ b/docs/source/release/v3.10.0/sans.rst
@@ -13,5 +13,7 @@ Bug Fixes
 - Fixed LOQ Batch mode bug where custom user file without a .txt ending was not being picked up.
 - Fixed Batch mode bug where the output name suffix was hardcoded to SANS2D. It now takes the individual instruments into account.
 - Fixed LOQ bug where prompt peak was not set correctly for monitor normalisation.
+- Fixed Batch mode bug where merged reductions set in the GUI were not respected.
+- Fixed display of current IDF, which was not updated when operating in batch mode.
 
 `Full list of changes on github <http://github.com/mantidproject/mantid/pulls?q=is%3Apr+milestone%3A%22Release+3.10%22+is%3Amerged+label%3A%22Component%3A+SANS%22>`__
diff --git a/docs/source/release/v3.10.0/ui.rst b/docs/source/release/v3.10.0/ui.rst
index c18ec9909f5d519acfd81cc42124e3cdec233702..b75bf1c6b1f8a45ba5d99f5735e69af11666b655 100644
--- a/docs/source/release/v3.10.0/ui.rst
+++ b/docs/source/release/v3.10.0/ui.rst
@@ -26,15 +26,32 @@ User Interface
 
 Instrument View
 ###############
- - Added the ability to visualise peaks generated by :ref:`algm-PredictPeaks` which fall off detectors.
- - Added the ability to zoom out on an unwrapped view.
- - Fixed a bug preventing the some of the banks from being visible when using a U correction.
- - Fixed a bug where pressing delete would delete a workspace even when the dock was not focused.
- - Fixed a bug where the user would not be prompted before deleting workspaces even if confirmations were turned on.
+
+- Added the ability to visualise peaks generated by :ref:`algm-PredictPeaks` which fall off detectors.
+- Added the ability to zoom out on an unwrapped view.
+- Fixed a bug preventing the some of the banks from being visible when using a U correction.
+- Fixed a bug where pressing delete would delete a workspace even when the dock was not focused.
+- Fixed a bug where the user would not be prompted before deleting workspaces even if confirmations were turned on.
 
 Plotting Improvements
 #####################
 
+- Surface, Contour, Waterfall, 1D and Tiled plotting of workspaces are now available from one dialog box (Plot Advanced) got from right-click menu of a workspace selection.
+- The log value facilities for Surface and Contour plot are now available for Waterfall and 1D plots, where they appear in the legend.
+
+.. figure:: ../../images/ArtRightGUIWaterfallCustom2sp1.PNG
+
+Here are a couple of plots with "Temp" selected as the log:
+
+.. figure:: ../../images/ArtWaterfallT1.PNG
+
+.. figure:: ../../images/ArtSurfacePlotT1.PNG
+
+More details `here <https://www.mantidproject.org/MBC_Displaying_data_in_multiple_workspaces>`_ .
+
+- Curves where all(Y) <= 0 are now not plotted when the Y-scale is set to logarithmic.
+  The previous behaviour assigned an arbitrary value of 0.1 which was confusing.
+
 Algorithm Toolbox
 #################
 
@@ -63,12 +80,17 @@ Bugs Resolved
 
 - Fixed an issue in the Script Window that caused the Convert Tabs to Spaces and vice versa operations to corrupt the script.
 - Fixed an issue where some graphs not associated with a workspace would not be shown in the project save as view.
+- Fixed an issue where the Spectrum Viewer could crash when a workspace contained infinities.
+- Fixed an issue where contour lines were displayed at the wrong location.
+
 
 SliceViewer Improvements
 ------------------------
 - Fixed a bug where the rebin button was toggled when the user switch axes.
 - Changed zoom level on peak. Now when zooming onto a spherical or ellipsoidal peak, the entire peak is visible when using the default window size.
 - Fixed a bug where swapping the dimensions did not rebin the workspace despite having autorebin enabled.
+- Fixed a bug where swapping the dimensions did not draw the axis scale correctly.
+
 
 VSI Improvments
 ---------------
@@ -76,6 +98,7 @@ VSI Improvments
 - The mapped array vtkMDHWSignalArray has been refactored to use the new vtkGenericDataArray class template. This interface minimizes virtual indirection and allows advanced compiler optimizations such as vectorization.
 - Minimize the number of times the workspace min and max values are calculated.
 - Threshold filter now reports progress to the user.
+- Add option to automatically choose a contrasting color for axes grid and colorbar.
 - Camera toolbar snaps to views along crystallographic axes,
 
 |
diff --git a/instrument/DNS_Definition_PAonly.xml b/instrument/DNS_Definition_PAonly.xml
index 15fab5107e83e6003b9de06370116d574f8369eb..3cfa615ef75bf0c443ef327bb522c2c331344978 100644
--- a/instrument/DNS_Definition_PAonly.xml
+++ b/instrument/DNS_Definition_PAonly.xml
@@ -16,9 +16,9 @@
       <handedness val="right" />
     </reference-frame>
   </defaults>
-  <!-- moderator -->
+  <!-- chopper -->
   <component type="moderator">
-    <location z="-2.27" />
+    <location z="-0.36325" />
   </component>
   <type name="moderator" is="Source"></type>
   <!-- monitor -->
diff --git a/instrument/DNS_Parameters.xml b/instrument/DNS_Parameters.xml
new file mode 100644
index 0000000000000000000000000000000000000000..f5db344a2b75f0999fd5b13ae306c86f69058a23
--- /dev/null
+++ b/instrument/DNS_Parameters.xml
@@ -0,0 +1,63 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<parameter-file instrument="DNS" valid-from="2013-10-01T00:00:00">
+
+        <component-link name="DNS">
+
+		<parameter name="deltaE-mode" type="string">
+			<value val="direct" />
+		</parameter>
+
+                <!-- Coil currents Ca, Cb, Cc, Cz to determine the neutron polarisation -->
+                <parameter name="x_currents" type="string">
+                        <value val="0,-2,-0.77,-2.21; -0.5,-1.5,-1.2,-2.15" />
+		</parameter>
+                <parameter name="y_currents" type="string">
+                        <value val="0,1.60,-2.77,-2.21; 0,-1.4,1.65,-2.15" />
+                </parameter>
+                <parameter name="z_currents" type="string">
+                        <value val="0,0.11,-0.5,0; 0,0.15,-0.5,0" />
+                </parameter>
+
+                <!-- Scaling factor to calculate the channel width in microseconds -->
+                <parameter name="channel_width_factor" type="string">
+                        <value val="20.0" />
+                </parameter>
+
+                <!-- 2theta tolerance, degrees -->
+                <parameter name="two_theta_tolerance" type="string">
+                        <value val="0.1" />
+                </parameter>
+
+                <!-- file suffixes to lookup for the standard data -->
+                <!-- Vanadium -->
+                <parameter name="vana" type="string">
+                        <value val="vana"/>
+                </parameter>
+                <!-- NiCr -->
+                <parameter name="nicr" type="string">
+                        <value val="nicr"/>
+                </parameter>
+                <!-- Instrument background -->
+                <parameter name="bkg" type="string">
+                        <value val="leer"/>
+                </parameter>
+
+                <!-- Normalization workspace name suffix -->
+                <parameter name="normws_suffix" type="string">
+                        <value val="_n"/>
+                </parameter>
+
+                <!-- formula for Monitor efficiency calculation. Algorithm: MonitorEfficiencyCorUser  -->
+                <parameter name="formula_mon_eff" type="string">
+                        <value val="sqrt(e/25.3)" />
+                </parameter>
+
+                <!-- Distance [m] between sample and equatorial line of the detector. Mandatory
+                        if you want to correct the flight paths. -->
+                <parameter name="l2" type="string">
+                        <value val="0.80" />
+                </parameter>
+
+	</component-link>
+
+</parameter-file>
diff --git a/instrument/EXED_Definition.xml b/instrument/EXED_Definition.xml
index d8ac3a8b8fb9ace1b8a329b832f08273402dd91d..127c8baca996647af0ce2af6ec0259c5e31e32aa 100644
--- a/instrument/EXED_Definition.xml
+++ b/instrument/EXED_Definition.xml
@@ -22,7 +22,7 @@ last-modified="2017-04-21 14:58:37">
 </defaults>
 
 <!-- Detector components -->
-<component type="monitors" idlist="monitors"><location/></component>
+<!--<component type="monitors" idlist="monitors"><location/></component>-->
 <component type="panel02" idlist="panel02"><location y="-0.447"/></component>
 <component type="panel04" idlist="panel04"><location y="-0.4466"/></component>
 <component type="Tank">
diff --git a/instrument/IN16_Definition.xml b/instrument/IN16_Definition.xml
index 683e89cfa5591f687757d038d5e82a50a670a6c6..f01387c9644b6fb6605e46a18694bf64ba7a90b8 100644
--- a/instrument/IN16_Definition.xml
+++ b/instrument/IN16_Definition.xml
@@ -66,7 +66,7 @@
   </type>
 
   <idlist idname="monitor1">
-    <id val="19" />
+    <id val="29" />
   </idlist>
 
 <!--  detector components -->
diff --git a/instrument/WISH_Parameters.xml b/instrument/WISH_Parameters.xml
index 09c45dbaae21347dca31254a89122318df3a64ca..23d27cd0023c0120b8686697d6a6cb704ec3eb81 100644
--- a/instrument/WISH_Parameters.xml
+++ b/instrument/WISH_Parameters.xml
@@ -3,20 +3,25 @@
     
     <component-link name="WISH">
     
+        <!-- Specify the gap between the tubes for Peak::findDetector -->
+        <parameter name="tube-gap">
+            <value val="0.00017"/>
+        </parameter>
+
         <!-- SplitInto defaults for MDWorkspaces -->
-		<parameter name="SplitInto">
-		  <value val="2"/>
-		</parameter>
-		
-		<!-- SplitThreshold defaults for MDWorkspaces -->
-		<parameter name="SplitThreshold">
-		  <value val="30"/>
-		</parameter>
-		
-		 <!-- MaxRecursionDepth defaults for MDWorkspaces -->
-		<parameter name="MaxRecursionDepth">
-		  <value val="20"/>
-		</parameter>
+        <parameter name="SplitInto">
+            <value val="2"/>
+        </parameter>
+
+        <!-- SplitThreshold defaults for MDWorkspaces -->
+        <parameter name="SplitThreshold">
+            <value val="30"/>
+        </parameter>
+
+        <!-- MaxRecursionDepth defaults for MDWorkspaces -->
+        <parameter name="MaxRecursionDepth">
+            <value val="20"/>
+        </parameter>
         
          <!-- Offset the psi values in the mini-plot -->
          <parameter name="offset-phi" type="string">
diff --git a/scripts/Diffraction/isis_powder/abstract_inst.py b/scripts/Diffraction/isis_powder/abstract_inst.py
index 6142e6047763db6429362c75f04b8e750ee4eda1..e4631eb8dfaa22577bb3e5fb912f4f3c380345ca 100644
--- a/scripts/Diffraction/isis_powder/abstract_inst.py
+++ b/scripts/Diffraction/isis_powder/abstract_inst.py
@@ -159,6 +159,15 @@ class AbstractInst(object):
         """
         return None
 
+    def _get_instrument_bin_widths(self):
+        """
+        Returns the bin widths to rebin the focused workspace to. If
+        the instrument does not want this step a value of None should
+        not rebin the workspace
+        :return: List of bin widths or None if no rebinning should take place
+        """
+        return None
+
     def _generate_auto_vanadium_calibration(self, run_details):
         """
         Used by focus if a vanadium spline was not found to automatically generate said spline if the instrument
@@ -217,6 +226,13 @@ class AbstractInst(object):
 
     # Steps applicable to all instruments
 
+    @staticmethod
+    def _generate_run_details_fingerprint(*args):
+        out_key = ""
+        for arg in args:
+            out_key += str(arg)
+        return out_key
+
     def _generate_out_file_paths(self, run_details):
         """
         Generates the various output paths and file names to be used during saving or as workspace names
@@ -226,6 +242,9 @@ class AbstractInst(object):
         output_directory = os.path.join(self._output_dir, run_details.label, self._user_name)
         output_directory = os.path.abspath(os.path.expanduser(output_directory))
         file_name = str(self._generate_output_file_name(run_number_string=run_details.output_run_string))
+        # Prepend the file extension used if it was set, this groups the files nicely in the file browser
+        # Also remove the dot at the start so we don't make hidden files in *nix systems
+        file_name = run_details.file_extension[1:] + file_name if run_details.file_extension else file_name
         nxs_file = os.path.join(output_directory, (file_name + ".nxs"))
         gss_file = os.path.join(output_directory, (file_name + ".gsas"))
         tof_xye_file = os.path.join(output_directory, (file_name + "_tof_xye.dat"))
diff --git a/scripts/Diffraction/isis_powder/gem.py b/scripts/Diffraction/isis_powder/gem.py
index 98af04e1010698009bb782baff964c1100f067ff..56ded044d800e6c49d102785eb6ec8697dfae47d 100644
--- a/scripts/Diffraction/isis_powder/gem.py
+++ b/scripts/Diffraction/isis_powder/gem.py
@@ -2,23 +2,20 @@ from __future__ import (absolute_import, division, print_function)
 
 from isis_powder.abstract_inst import AbstractInst
 from isis_powder.gem_routines import gem_advanced_config, gem_algs, gem_param_mapping
-from isis_powder.routines import common, instrument_settings, yaml_parser
+from isis_powder.routines import common, instrument_settings
 
 
 class Gem(AbstractInst):
     def __init__(self, **kwargs):
-        basic_config_dict = yaml_parser.open_yaml_file_as_dictionary(kwargs.get("config_file", None))
-
         self._inst_settings = instrument_settings.InstrumentSettings(
             param_map=gem_param_mapping.attr_mapping, adv_conf_dict=gem_advanced_config.get_all_adv_variables(),
-            kwargs=kwargs, basic_conf_dict=basic_config_dict)
+            kwargs=kwargs)
 
         super(Gem, self).__init__(user_name=self._inst_settings.user_name,
                                   calibration_dir=self._inst_settings.calibration_dir,
                                   output_dir=self._inst_settings.output_dir, inst_prefix="GEM")
 
-        self._cached_run_details = None
-        self._cached_run_number = None
+        self._cached_run_details = {}
 
     def focus(self, **kwargs):
         self._inst_settings.update_attributes(kwargs=kwargs)
@@ -32,8 +29,14 @@ class Gem(AbstractInst):
                                      do_absorb_corrections=self._inst_settings.do_absorb_corrections)
 
     def _get_run_details(self, run_number_string):
-        return gem_algs.get_run_details(run_number_string=run_number_string, inst_settings=self._inst_settings,
-                                        is_vanadium_run=self._is_vanadium)
+        run_number_string_key = self._generate_run_details_fingerprint(run_number_string,
+                                                                       self._inst_settings.file_extension)
+        if run_number_string_key in self._cached_run_details:
+            return self._cached_run_details[run_number_string_key]
+
+        self._cached_run_details[run_number_string_key] = gem_algs.get_run_details(
+            run_number_string=run_number_string, inst_settings=self._inst_settings, is_vanadium_run=self._is_vanadium)
+        return self._cached_run_details[run_number_string_key]
 
     def _generate_auto_vanadium_calibration(self, run_details):
         raise NotImplementedError()
diff --git a/scripts/Diffraction/isis_powder/gem_routines/Examples/Gem_Mapping.yaml b/scripts/Diffraction/isis_powder/gem_routines/Examples/gem_cycle_map_example.yaml
similarity index 100%
rename from scripts/Diffraction/isis_powder/gem_routines/Examples/Gem_Mapping.yaml
rename to scripts/Diffraction/isis_powder/gem_routines/Examples/gem_cycle_map_example.yaml
diff --git a/scripts/Diffraction/isis_powder/gem_routines/gem_param_mapping.py b/scripts/Diffraction/isis_powder/gem_routines/gem_param_mapping.py
index 0b6cdb07213f52e7d9062c596880d0f20acf2849..fafb1924da8ba3877ff798fbd49c36b63f41ed3a 100644
--- a/scripts/Diffraction/isis_powder/gem_routines/gem_param_mapping.py
+++ b/scripts/Diffraction/isis_powder/gem_routines/gem_param_mapping.py
@@ -11,13 +11,14 @@ attr_mapping = \
      ParamMapEntry(ext_name="calibration_mapping_file",  int_name="cal_mapping_path"),
      ParamMapEntry(ext_name="config_file",               int_name="config_file"),
      ParamMapEntry(ext_name="do_absorb_corrections",     int_name="do_absorb_corrections"),
+     ParamMapEntry(ext_name="file_ext",                  int_name="file_extension", optional=True),
+     ParamMapEntry(ext_name="first_cycle_run_no",        int_name="run_in_range"),
      ParamMapEntry(ext_name="focused_cropping_values",   int_name="focused_cropping_values"),
      ParamMapEntry(ext_name="grouping_file_name",        int_name="grouping_file_name"),
      ParamMapEntry(ext_name="input_mode",                int_name="input_batching", enum_class=INPUT_BATCHING),
      ParamMapEntry(ext_name="mode",                      int_name="mode",           enum_class=GEM_CHOPPER_MODES),
      ParamMapEntry(ext_name="multiple_scattering",       int_name="multiple_scattering"),
      ParamMapEntry(ext_name="raw_tof_cropping_values",   int_name="raw_tof_cropping_values"),
-     ParamMapEntry(ext_name="run_in_range",              int_name="run_in_range"),
      ParamMapEntry(ext_name="run_number",                int_name="run_number"),
      ParamMapEntry(ext_name="sample_empty",              int_name="sample_empty",   optional=True),
      ParamMapEntry(ext_name="spline_coefficient",        int_name="spline_coeff"),
diff --git a/scripts/Diffraction/isis_powder/pearl.py b/scripts/Diffraction/isis_powder/pearl.py
index 1610c2a3bbb2116eb272867f58c7c38ab74c485d..c70774104fc43904c4b2561ef5d000f58b65d13a 100644
--- a/scripts/Diffraction/isis_powder/pearl.py
+++ b/scripts/Diffraction/isis_powder/pearl.py
@@ -2,7 +2,7 @@ from __future__ import (absolute_import, division, print_function)
 
 import mantid.simpleapi as mantid
 
-from isis_powder.routines import common, instrument_settings, yaml_parser
+from isis_powder.routines import common, instrument_settings
 from isis_powder.abstract_inst import AbstractInst
 from isis_powder.pearl_routines import pearl_algs, pearl_output, pearl_advanced_config, pearl_param_mapping
 
@@ -10,18 +10,15 @@ from isis_powder.pearl_routines import pearl_algs, pearl_output, pearl_advanced_
 class Pearl(AbstractInst):
 
     def __init__(self, **kwargs):
-        basic_config_dict = yaml_parser.open_yaml_file_as_dictionary(kwargs.get("config_file", None))
-
         self._inst_settings = instrument_settings.InstrumentSettings(
            param_map=pearl_param_mapping.attr_mapping, adv_conf_dict=pearl_advanced_config.get_all_adv_variables(),
-           basic_conf_dict=basic_config_dict, kwargs=kwargs)
+           kwargs=kwargs)
 
         super(Pearl, self).__init__(user_name=self._inst_settings.user_name,
                                     calibration_dir=self._inst_settings.calibration_dir,
                                     output_dir=self._inst_settings.output_dir, inst_prefix="PEARL")
 
-        self._cached_run_details = None
-        self._cached_run_details_number = None
+        self._cached_run_details = {}
 
     def focus(self, **kwargs):
         self._switch_long_mode_inst_settings(kwargs.get("long_mode"))
@@ -47,16 +44,14 @@ class Pearl(AbstractInst):
                                      do_absorb_corrections=self._inst_settings.absorb_corrections)
 
     def _get_run_details(self, run_number_string):
-        if self._cached_run_details_number == run_number_string:
-            return self._cached_run_details
-
-        run_details = pearl_algs.get_run_details(run_number_string=run_number_string,
-                                                 inst_settings=self._inst_settings,
-                                                 is_vanadium_run=self._is_vanadium)
-
-        self._cached_run_details_number = run_number_string
-        self._cached_run_details = run_details
-        return run_details
+        run_number_string_key = self._generate_run_details_fingerprint(run_number_string,
+                                                                       self._inst_settings.file_extension)
+        if run_number_string_key in self._cached_run_details:
+            return self._cached_run_details[run_number_string_key]
+
+        self._cached_run_details[run_number_string_key] = pearl_algs.get_run_details(
+            run_number_string=run_number_string, inst_settings=self._inst_settings, is_vanadium_run=self._is_vanadium)
+        return self._cached_run_details[run_number_string_key]
 
     # Params #
 
diff --git a/scripts/Diffraction/isis_powder/pearl_routines/Examples/pearl_calibration.yaml b/scripts/Diffraction/isis_powder/pearl_routines/Examples/pearl_cycle_map_example.yaml
similarity index 100%
rename from scripts/Diffraction/isis_powder/pearl_routines/Examples/pearl_calibration.yaml
rename to scripts/Diffraction/isis_powder/pearl_routines/Examples/pearl_cycle_map_example.yaml
diff --git a/scripts/Diffraction/isis_powder/pearl_routines/pearl_param_mapping.py b/scripts/Diffraction/isis_powder/pearl_routines/pearl_param_mapping.py
index 1e07be4b10fd3a44fc302ece4c0e051e1041f216..f2e2b408a32ad7d15bdad658e9425d6f4c2fcbca 100644
--- a/scripts/Diffraction/isis_powder/pearl_routines/pearl_param_mapping.py
+++ b/scripts/Diffraction/isis_powder/pearl_routines/pearl_param_mapping.py
@@ -11,6 +11,7 @@ attr_mapping = \
         ParamMapEntry(ext_name="calibration_config_path",    int_name="cal_mapping_path"),
         ParamMapEntry(ext_name="calibration_directory",      int_name="calibration_dir"),
         ParamMapEntry(ext_name="do_absorb_corrections",      int_name="absorb_corrections"),
+        ParamMapEntry(ext_name="file_ext",                   int_name="file_extension", optional=True),
         ParamMapEntry(ext_name="focus_mode",                 int_name="focus_mode", enum_class=PEARL_FOCUS_MODES),
         ParamMapEntry(ext_name="long_mode",                  int_name="long_mode"),
         ParamMapEntry(ext_name="monitor_lambda_crop_range",  int_name="monitor_lambda"),
diff --git a/scripts/Diffraction/isis_powder/polaris.py b/scripts/Diffraction/isis_powder/polaris.py
index 510898ed047896ba58d0b9fbf52872d4ec79b040..7c3d58281bac9959bb1fb5e5cbea6aab2cba3cb1 100644
--- a/scripts/Diffraction/isis_powder/polaris.py
+++ b/scripts/Diffraction/isis_powder/polaris.py
@@ -2,27 +2,23 @@ from __future__ import (absolute_import, division, print_function)
 
 import os
 
-from isis_powder.routines import common, instrument_settings, yaml_parser
+from isis_powder.routines import common, instrument_settings
 from isis_powder.abstract_inst import AbstractInst
 from isis_powder.polaris_routines import polaris_advanced_config, polaris_algs, polaris_param_mapping
 
 
 class Polaris(AbstractInst):
     def __init__(self, **kwargs):
-        basic_config_dict = yaml_parser.open_yaml_file_as_dictionary(kwargs.get("config_file", None))
         self._inst_settings = instrument_settings.InstrumentSettings(
             param_map=polaris_param_mapping.attr_mapping, adv_conf_dict=polaris_advanced_config.variables,
-            basic_conf_dict=basic_config_dict, kwargs=kwargs)
+            kwargs=kwargs)
 
         super(Polaris, self).__init__(user_name=self._inst_settings.user_name,
                                       calibration_dir=self._inst_settings.calibration_dir,
                                       output_dir=self._inst_settings.output_dir, inst_prefix="POL")
 
         # Hold the last dictionary later to avoid us having to keep parsing the YAML
-        self._run_details_last_run_number = None
-        self._run_details_cached_obj = None
-
-        self._ads_workaround = 0
+        self._run_details_cached_obj = {}
 
     def focus(self, **kwargs):
         self._inst_settings.update_attributes(kwargs=kwargs)
@@ -89,18 +85,19 @@ class Polaris(AbstractInst):
     def _get_input_batching_mode(self):
         return self._inst_settings.input_mode
 
-    def _get_run_details(self, run_number_string):
-        if self._run_details_last_run_number == run_number_string:
-            return self._run_details_cached_obj
+    def _get_instrument_bin_widths(self):
+        return self._inst_settings.focused_bin_widths
 
-        run_details = polaris_algs.get_run_details(run_number_string=run_number_string,
-                                                   inst_settings=self._inst_settings, is_vanadium_run=self._is_vanadium)
+    def _get_run_details(self, run_number_string):
+        run_number_string_key = self._generate_run_details_fingerprint(run_number_string,
+                                                                       self._inst_settings.file_extension)
+        if run_number_string_key in self._run_details_cached_obj:
+            return self._run_details_cached_obj[run_number_string_key]
 
-        # Hold obj in case same run range is requested
-        self._run_details_last_run_number = run_number_string
-        self._run_details_cached_obj = run_details
+        self._run_details_cached_obj[run_number_string_key] = polaris_algs.get_run_details(
+            run_number_string=run_number_string, inst_settings=self._inst_settings, is_vanadium_run=self._is_vanadium)
 
-        return run_details
+        return self._run_details_cached_obj[run_number_string_key]
 
     def _spline_vanadium_ws(self, focused_vanadium_spectra, instrument_version=''):
         masking_file_name = self._inst_settings.masking_file_name
diff --git a/scripts/Diffraction/isis_powder/polaris_routines/Examples/polaris_calibration.yaml b/scripts/Diffraction/isis_powder/polaris_routines/Examples/polaris_cycle_map_example.yaml
similarity index 100%
rename from scripts/Diffraction/isis_powder/polaris_routines/Examples/polaris_calibration.yaml
rename to scripts/Diffraction/isis_powder/polaris_routines/Examples/polaris_cycle_map_example.yaml
diff --git a/scripts/Diffraction/isis_powder/polaris_routines/polaris_advanced_config.py b/scripts/Diffraction/isis_powder/polaris_routines/polaris_advanced_config.py
index a083c3580632377bbfd64b1a677c11af91031183..3d30c280b7e84d0dc43ae78b109ebfa2d90e702a 100644
--- a/scripts/Diffraction/isis_powder/polaris_routines/polaris_advanced_config.py
+++ b/scripts/Diffraction/isis_powder/polaris_routines/polaris_advanced_config.py
@@ -30,6 +30,16 @@ focused_cropping_values = [
     (1500, 19900),  # Bank 5
     ]
 
+focused_bin_widths = [
+    # Note you want these to be negative for logarithmic (dt / t) binning
+    # else the output file will be larger than 1GB
+    -0.0050,  # Bank 1
+    -0.0010,  # Bank 2
+    -0.0010,  # Bank 3
+    -0.0010,  # Bank 4
+    -0.0005,  # Bank 5
+]
+
 vanadium_cropping_values = [
     (800, 19995),  # Bank 1
     (800, 19995),  # Bank 2
@@ -67,5 +77,6 @@ variables = {
     "file_names_dict": file_names,
     "script_params": script_params,
     "focused_cropping_values": focused_cropping_values,
-    "vanadium_cropping_values": vanadium_cropping_values
+    "vanadium_cropping_values": vanadium_cropping_values,
+    "focused_bin_widths": focused_bin_widths,
 }
diff --git a/scripts/Diffraction/isis_powder/polaris_routines/polaris_algs.py b/scripts/Diffraction/isis_powder/polaris_routines/polaris_algs.py
index 2bacd4e670a9112c786c232b35474279b05551fb..6ff3b84fbbf5717160eb2c7c1081c1c922ffc558 100644
--- a/scripts/Diffraction/isis_powder/polaris_routines/polaris_algs.py
+++ b/scripts/Diffraction/isis_powder/polaris_routines/polaris_algs.py
@@ -9,7 +9,7 @@ from isis_powder.polaris_routines import polaris_advanced_config
 
 
 def calculate_absorb_corrections(ws_to_correct, multiple_scattering):
-    mantid.MaskDetectors(ws_to_correct, SpectraList=list(range(0, 55)))
+    mantid.MaskDetectors(ws_to_correct, SpectraList=list(range(1, 55)))
 
     absorb_dict = polaris_advanced_config.absorption_correction_params
     ws_to_correct = absorb_corrections.run_cylinder_absorb_corrections(
diff --git a/scripts/Diffraction/isis_powder/polaris_routines/polaris_param_mapping.py b/scripts/Diffraction/isis_powder/polaris_routines/polaris_param_mapping.py
index 50d351e942f2b308881256c4eb48e43907d5663c..829a95b2885a72a895bc51994040e6f712a6b188 100644
--- a/scripts/Diffraction/isis_powder/polaris_routines/polaris_param_mapping.py
+++ b/scripts/Diffraction/isis_powder/polaris_routines/polaris_param_mapping.py
@@ -12,14 +12,18 @@ attr_mapping = \
      ParamMapEntry(ext_name="config_file",              int_name="config_file"),
      ParamMapEntry(ext_name="do_absorb_corrections",    int_name="do_absorb_corrections"),
      ParamMapEntry(ext_name="do_van_normalisation",     int_name="do_van_normalisation"),
+     ParamMapEntry(ext_name="file_ext",                 int_name="file_extension", optional=True),
+     ParamMapEntry(ext_name="first_cycle_run_no",       int_name="run_in_range"),
      ParamMapEntry(ext_name="focused_cropping_values",  int_name="focused_cropping_values"),
+     ParamMapEntry(ext_name="focused_bin_widths",       int_name="focused_bin_widths"),
      ParamMapEntry(ext_name="grouping_file_name",       int_name="grouping_file_name"),
      ParamMapEntry(ext_name="input_mode",               int_name="input_mode", enum_class=INPUT_BATCHING),
      ParamMapEntry(ext_name="masking_file_name",        int_name="masking_file_name"),
      ParamMapEntry(ext_name="multiple_scattering",      int_name="multiple_scattering"),
      ParamMapEntry(ext_name="raw_data_cropping_values", int_name="raw_data_crop_values"),
-     ParamMapEntry(ext_name="run_in_range",             int_name="run_in_range"),
      ParamMapEntry(ext_name="run_number",               int_name="run_number"),
+     ParamMapEntry(ext_name="sample_empty",             int_name="sample_empty",   optional=True),
+     ParamMapEntry(ext_name="sample_empty_scale",       int_name="sample_empty_scale"),
      ParamMapEntry(ext_name="spline_coefficient",       int_name="spline_coeff"),
      ParamMapEntry(ext_name="output_directory",         int_name="output_dir"),
      ParamMapEntry(ext_name="user_name",                int_name="user_name"),
diff --git a/scripts/Diffraction/isis_powder/routines/common.py b/scripts/Diffraction/isis_powder/routines/common.py
index 87295cbe95747469b75b54f59ade1b5a3c4019c0..b5743646a58d32f065c5811a2c4513d58707baab 100644
--- a/scripts/Diffraction/isis_powder/routines/common.py
+++ b/scripts/Diffraction/isis_powder/routines/common.py
@@ -244,7 +244,8 @@ def load_current_normalised_ws_list(run_number_string, instrument, input_batchin
         input_batching = instrument._get_input_batching_mode()
 
     run_information = instrument._get_run_details(run_number_string=run_number_string)
-    raw_ws_list = _load_raw_files(run_number_string=run_number_string, instrument=instrument)
+    file_ext = run_information.file_extension
+    raw_ws_list = _load_raw_files(run_number_string=run_number_string, instrument=instrument, file_ext=file_ext)
 
     if input_batching == INPUT_BATCHING.Summed and len(raw_ws_list) > 1:
         summed_ws = _sum_ws_range(ws_list=raw_ws_list)
@@ -257,6 +258,64 @@ def load_current_normalised_ws_list(run_number_string, instrument, input_batchin
     return normalised_ws_list
 
 
+def rebin_workspace(workspace, new_bin_width, start_x=None, end_x=None):
+    """
+    Rebins the specified workspace with the specified new bin width. Allows the user
+    to also set optionally the first and final bin boundaries of the histogram too.
+    If the bin boundaries are not set they are preserved from the original workspace
+    :param workspace: The workspace to rebin
+    :param new_bin_width: The new bin width to use across the workspace
+    :param start_x: (Optional) The first x bin to crop to
+    :param end_x: (Optional) The final x bin to crop to
+    :return: The rebinned workspace
+    """
+
+    # Find the starting and ending bin boundaries if they were not set
+    if start_x is None:
+        start_x = workspace.readX(0)[0]
+    if end_x is None:
+        end_x = workspace.readX(0)[-1]
+
+    rebin_string = str(start_x) + ',' + str(new_bin_width) + ',' + str(end_x)
+    workspace = mantid.Rebin(InputWorkspace=workspace, OutputWorkspace=workspace, Params=rebin_string)
+    return workspace
+
+
+def rebin_workspace_list(workspace_list, bin_width_list, start_x_list=None, end_x_list=None):
+    """
+    Rebins a list of workspaces with the specified bin widths in the list provided.
+    The number of bin widths and workspaces in the list must match. Additionally if
+    the optional parameters for start_x_list or end_x_list are provided these must
+    have the same length too.
+    :param workspace_list: The list of workspaces to rebin in place
+    :param bin_width_list: The list of new bin widths to apply to each workspace
+    :param start_x_list: The list of starting x boundaries to rebin to
+    :param end_x_list: The list of ending x boundaries to rebin to
+    :return: List of rebinned workspace
+    """
+    if not isinstance(workspace_list, list) or not isinstance(bin_width_list, list):
+        raise RuntimeError("One of the types passed to rebin_workspace_list was not a list")
+
+    ws_list_len = len(workspace_list)
+    if ws_list_len != len(bin_width_list):
+        raise ValueError("The number of bin widths found to rebin to does not match the number of banks")
+    if start_x_list and len(start_x_list) != ws_list_len:
+        raise ValueError("The number of starting bin values does not match the number of banks")
+    if end_x_list and len(end_x_list) != ws_list_len:
+        raise ValueError("The number of ending bin values does not match the number of banks")
+
+    # Create a list of None types of equal length to make using zip iterator easy
+    start_x_list = [None] * ws_list_len if start_x_list is None else start_x_list
+    end_x_list = [None] * ws_list_len if end_x_list is None else end_x_list
+
+    output_list = []
+    for ws, bin_width, start_x, end_x in zip(workspace_list, bin_width_list, start_x_list, end_x_list):
+        output_list.append(rebin_workspace(workspace=ws, new_bin_width=bin_width,
+                                           start_x=start_x, end_x=end_x))
+
+    return output_list
+
+
 def remove_intermediate_workspace(workspaces):
     """
     Removes the specified workspace(s) from the ADS. Can accept lists of workspaces. It
@@ -319,7 +378,7 @@ def spline_workspaces(focused_vanadium_spectra, num_splines):
     return splined_ws_list
 
 
-def subtract_summed_runs(ws_to_correct, empty_sample_ws_string, instrument):
+def subtract_summed_runs(ws_to_correct, empty_sample_ws_string, instrument, scale_factor=None):
     """
     Loads the list of empty runs specified by the empty_sample_ws_string and subtracts
     them from the workspace specified. Returns the subtracted workspace.
@@ -328,11 +387,18 @@ def subtract_summed_runs(ws_to_correct, empty_sample_ws_string, instrument):
     :param instrument: The instrument object these runs belong to
     :return: The workspace with the empty runs subtracted
     """
-    if empty_sample_ws_string:
-        empty_sample = load_current_normalised_ws_list(run_number_string=empty_sample_ws_string, instrument=instrument,
-                                                       input_batching=INPUT_BATCHING.Summed)
-        mantid.Minus(LHSWorkspace=ws_to_correct, RHSWorkspace=empty_sample[0], OutputWorkspace=ws_to_correct)
-        remove_intermediate_workspace(empty_sample)
+    # If an empty string was not specified just return to skip this step
+    if empty_sample_ws_string is None:
+        return ws_to_correct
+
+    empty_sample = load_current_normalised_ws_list(run_number_string=empty_sample_ws_string, instrument=instrument,
+                                                   input_batching=INPUT_BATCHING.Summed)
+    empty_sample = empty_sample[0]
+    if scale_factor:
+        empty_sample = mantid.Scale(InputWorkspace=empty_sample, OutputWorkspace=empty_sample, Factor=scale_factor,
+                                    Operation="Multiply")
+    mantid.Minus(LHSWorkspace=ws_to_correct, RHSWorkspace=empty_sample, OutputWorkspace=ws_to_correct)
+    remove_intermediate_workspace(empty_sample)
 
     return ws_to_correct
 
@@ -386,7 +452,7 @@ def _check_load_range(list_of_runs_to_load):
                          " Found " + str(len(list_of_runs_to_load)) + " Aborting.")
 
 
-def _load_raw_files(run_number_string, instrument):
+def _load_raw_files(run_number_string, instrument, file_ext=None):
     """
     Uses the run number string to generate a list of run numbers to load in
     :param run_number_string: The run number string to generate
@@ -394,11 +460,11 @@ def _load_raw_files(run_number_string, instrument):
     :return: A list of loaded workspaces
     """
     run_number_list = generate_run_numbers(run_number_string=run_number_string)
-    load_raw_ws = _load_list_of_files(run_number_list, instrument)
+    load_raw_ws = _load_list_of_files(run_number_list, instrument, file_ext=file_ext)
     return load_raw_ws
 
 
-def _load_list_of_files(run_numbers_list, instrument):
+def _load_list_of_files(run_numbers_list, instrument, file_ext=None):
     """
     Loads files based on the list passed to it. If the list is
     greater than the maximum range it will raise an exception
@@ -412,6 +478,7 @@ def _load_list_of_files(run_numbers_list, instrument):
 
     for run_number in run_numbers_list:
         file_name = instrument._generate_input_file_name(run_number=run_number)
+        file_name = file_name + str(file_ext) if file_ext else file_name
         read_ws = mantid.Load(Filename=file_name)
         read_ws_list.append(mantid.RenameWorkspace(InputWorkspace=read_ws, OutputWorkspace=file_name))
 
diff --git a/scripts/Diffraction/isis_powder/routines/common_output.py b/scripts/Diffraction/isis_powder/routines/common_output.py
index 8c94c293192d6775d13d856b765da4d5f2353b69..9a47c6360804a66289edb49f2d802d84a48267c0 100644
--- a/scripts/Diffraction/isis_powder/routines/common_output.py
+++ b/scripts/Diffraction/isis_powder/routines/common_output.py
@@ -17,18 +17,19 @@ def split_into_tof_d_spacing_groups(run_details, processed_spectra):
     d_spacing_output = []
     tof_output = []
     run_number = str(run_details.output_run_string)
-    for name_index, ws in enumerate(processed_spectra):
-        d_spacing_out_name = run_number + "-ResultD-" + str(name_index + 1)
-        tof_out_name = run_number + "-ResultTOF-" + str(name_index + 1)
+    ext = run_details.file_extension if run_details.file_extension else ""
+    for name_index, ws in enumerate(processed_spectra, start=1):
+        d_spacing_out_name = run_number + ext + "-ResultD-" + str(name_index)
+        tof_out_name = run_number + ext + "-ResultTOF-" + str(name_index)
 
         d_spacing_output.append(mantid.ConvertUnits(InputWorkspace=ws, OutputWorkspace=d_spacing_out_name,
                                                     Target="dSpacing"))
         tof_output.append(mantid.ConvertUnits(InputWorkspace=ws, OutputWorkspace=tof_out_name, Target="TOF"))
 
     # Group the outputs
-    d_spacing_group_name = run_number + "-Results-D-Grp"
+    d_spacing_group_name = run_number + ext + "-Results-D-Grp"
     d_spacing_group = mantid.GroupWorkspaces(InputWorkspaces=d_spacing_output, OutputWorkspace=d_spacing_group_name)
-    tof_group_name = run_number + "-Results-TOF-Grp"
+    tof_group_name = run_number + ext + "-Results-TOF-Grp"
     tof_group = mantid.GroupWorkspaces(InputWorkspaces=tof_output, OutputWorkspace=tof_group_name)
 
     return d_spacing_group, tof_group
diff --git a/scripts/Diffraction/isis_powder/routines/focus.py b/scripts/Diffraction/isis_powder/routines/focus.py
index 0175b1848bcb294459ffff694b61f31a909acad5..380acf77c31cd012cba07f38afe0f249e83418ef 100644
--- a/scripts/Diffraction/isis_powder/routines/focus.py
+++ b/scripts/Diffraction/isis_powder/routines/focus.py
@@ -23,12 +23,14 @@ def _focus_one_ws(ws, run_number, instrument, perform_vanadium_norm):
     if perform_vanadium_norm:
         _test_splined_vanadium_exists(instrument, run_details)
 
-    # Subtract empty beam runs
+    # Subtract empty instrument runs
     input_workspace = common.subtract_summed_runs(ws_to_correct=ws, instrument=instrument,
                                                   empty_sample_ws_string=run_details.empty_runs)
     # Subtract a sample empty if specified
-    input_workspace = common.subtract_summed_runs(ws_to_correct=input_workspace, instrument=instrument,
-                                                  empty_sample_ws_string=run_details.sample_empty)
+    if run_details.sample_empty:
+        input_workspace = common.subtract_summed_runs(ws_to_correct=input_workspace, instrument=instrument,
+                                                      empty_sample_ws_string=run_details.sample_empty,
+                                                      scale_factor=instrument._inst_settings.sample_empty_scale)
 
     # Crop to largest acceptable TOF range
     input_workspace = instrument._crop_raw_to_expected_tof_range(ws_to_crop=input_workspace)
@@ -44,10 +46,16 @@ def _focus_one_ws(ws, run_number, instrument, perform_vanadium_norm):
                                                      input_workspace=focused_ws,
                                                      perform_vanadium_norm=perform_vanadium_norm)
 
-    cropped_spectra = instrument._crop_banks_to_user_tof(calibrated_spectra)
+    output_spectra = instrument._crop_banks_to_user_tof(calibrated_spectra)
+
+    bin_widths = instrument._get_instrument_bin_widths()
+    if bin_widths:
+        # Reduce the bin width if required on this instrument
+        output_spectra = common.rebin_workspace_list(workspace_list=output_spectra,
+                                                     bin_width_list=bin_widths)
 
     # Output
-    d_spacing_group, tof_group = instrument._output_focused_ws(cropped_spectra, run_details=run_details)
+    d_spacing_group, tof_group = instrument._output_focused_ws(output_spectra, run_details=run_details)
 
     common.keep_single_ws_unit(d_spacing_group=d_spacing_group, tof_group=tof_group,
                                unit_to_keep=instrument._get_unit_to_keep())
@@ -56,7 +64,7 @@ def _focus_one_ws(ws, run_number, instrument, perform_vanadium_norm):
     common.remove_intermediate_workspace(input_workspace)
     common.remove_intermediate_workspace(aligned_ws)
     common.remove_intermediate_workspace(focused_ws)
-    common.remove_intermediate_workspace(cropped_spectra)
+    common.remove_intermediate_workspace(output_spectra)
 
     return d_spacing_group
 
diff --git a/scripts/Diffraction/isis_powder/routines/instrument_settings.py b/scripts/Diffraction/isis_powder/routines/instrument_settings.py
index 2328127017ae6283aeb14f9f7d248fee19a43056..ef0902a8459a6591978c87b8798a41c01626e09e 100644
--- a/scripts/Diffraction/isis_powder/routines/instrument_settings.py
+++ b/scripts/Diffraction/isis_powder/routines/instrument_settings.py
@@ -1,31 +1,41 @@
 from __future__ import (absolute_import, division, print_function)
 
 from six import iteritems
+from isis_powder.routines import yaml_parser
 import warnings
 
 
 # Have to patch warnings at runtime to not print the source code. This is even advertised as a 'feature' of
 # the warnings library in the documentation: https://docs.python.org/3/library/warnings.html#warnings.showwarning
-def warning_no_source(msg, *_):
+def _warning_no_source(msg, *_, **__):
     return str(msg) + '\n'
 
-warnings.formatwarning = warning_no_source
+warnings.formatwarning = _warning_no_source
 warnings.simplefilter('always', UserWarning)
 
 
 class InstrumentSettings(object):
     # Holds instance variables updated at runtime
-    def __init__(self, param_map, adv_conf_dict=None, basic_conf_dict=None, kwargs=None):
+    def __init__(self, param_map, adv_conf_dict=None, kwargs=None):
         self._param_map = param_map
         self._adv_config_dict = adv_conf_dict
-        self._basic_conf_dict = basic_conf_dict
         self._kwargs = kwargs
+        self._basic_conf_dict = None
+
+        # Check if we have kwargs otherwise this work cannot be completed (e.g. using automated testing)
+        if kwargs:
+            config_file_path = kwargs.get("config_file", None)
+            if not config_file_path:
+                warnings.warn("No config file was specified. If one was meant to be used the path to a YAML config file"
+                              " is set with the 'config_file' parameter.")
+            # Always do this so we have a known state of the internal variable
+            self._basic_conf_dict = yaml_parser.open_yaml_file_as_dictionary(config_file_path)
 
         # We parse in the order advanced config, basic config (if specified), kwargs.
         # This means that users can use the advanced config as a safe set of defaults, with their own preferences as
         # the next layer which can override defaults and finally script arguments as their final override.
         self._parse_attributes(dict_to_parse=adv_conf_dict)
-        self._parse_attributes(dict_to_parse=basic_conf_dict)
+        self._parse_attributes(dict_to_parse=self._basic_conf_dict)
         self._parse_attributes(dict_to_parse=kwargs)
 
     # __getattr__ is only called if the attribute was not set so we already know
@@ -135,11 +145,10 @@ def _check_value_is_in_enum(val, enum):
     :return: The correctly cased val. Otherwise raises a value error.
     """
     seen_val_in_enum = False
-    enum_known_vals = []
+    enum_known_vals = _get_enum_values(enum_cls=enum)
     lower_string_val = str(val).lower()
 
-    known_values = _get_enum_values(enum_cls=enum)
-    for enum_val in known_values:
+    for enum_val in enum_known_vals:
 
         if lower_string_val == enum_val.lower():
             # Get the correctly capitalised value so we no longer have to call lower
diff --git a/scripts/Diffraction/isis_powder/routines/run_details.py b/scripts/Diffraction/isis_powder/routines/run_details.py
index 423ce13a6bfab2e24faa736731a6c10d5c5f62fd..a694713bc9ddcf31ee4f5a10d7807b745dcfaa7a 100644
--- a/scripts/Diffraction/isis_powder/routines/run_details.py
+++ b/scripts/Diffraction/isis_powder/routines/run_details.py
@@ -37,9 +37,16 @@ def create_run_details_object(run_number_string, inst_settings, is_vanadium_run,
         run_number = vanadium_run_string
         output_run_string = vanadium_run_string
     else:
+        # Otherwise set it to the user input
         output_run_string = run_number_string
 
-    # Sample empty if there is one
+    # Get the file extension if set
+    file_extension = getattr(inst_settings, "file_extension")
+    if file_extension:
+        # Prefix dot if user has forgotten to
+        file_extension = file_extension if file_extension.startswith('.') else '.' + file_extension
+
+    # Sample empty if there is one as this is instrument specific
     sample_empty = getattr(inst_settings, "sample_empty", None)
 
     # Generate the paths
@@ -49,11 +56,11 @@ def create_run_details_object(run_number_string, inst_settings, is_vanadium_run,
     splined_van_path = os.path.join(calibration_dir, label, results_dict["splined_van_name"])
     van_absorb_path = os.path.join(calibration_dir, van_abs_file_name) if van_abs_file_name else None
 
-    return _RunDetails(empty_run_number=results_dict["empty_runs"], run_number=run_number,
-                       output_run_string=output_run_string, label=label, offset_file_path=offset_file_path,
-                       grouping_file_path=grouping_file_path, splined_vanadium_path=splined_van_path,
-                       vanadium_run_number=vanadium_run_string, sample_empty=sample_empty,
-                       vanadium_abs_path=van_absorb_path)
+    return _RunDetails(empty_run_number=results_dict["empty_runs"], file_extension=file_extension,
+                       run_number=run_number, output_run_string=output_run_string, label=label,
+                       offset_file_path=offset_file_path, grouping_file_path=grouping_file_path,
+                       splined_vanadium_path=splined_van_path, vanadium_run_number=vanadium_run_string,
+                       sample_empty=sample_empty, vanadium_abs_path=van_absorb_path)
 
 
 def _get_customisable_attributes(cal_dict, inst_settings, empty_run_call, grouping_name_call, vanadium_run_call,
@@ -158,7 +165,7 @@ class _RunDetails(object):
     This class holds the full file paths associated with each run and various other useful attributes
     """
 
-    def __init__(self, empty_run_number, run_number, output_run_string, label,
+    def __init__(self, empty_run_number, file_extension, run_number, output_run_string, label,
                  offset_file_path, grouping_file_path, splined_vanadium_path, vanadium_run_number,
                  sample_empty, vanadium_abs_path):
 
@@ -176,5 +183,6 @@ class _RunDetails(object):
         self.vanadium_run_numbers = vanadium_run_number
 
         # Optional
+        self.file_extension = str(file_extension) if file_extension else None
         self.sample_empty = sample_empty
         self.vanadium_absorption_path = vanadium_abs_path
diff --git a/scripts/Diffraction/isis_powder/routines/yaml_parser.py b/scripts/Diffraction/isis_powder/routines/yaml_parser.py
index 4a8c4201a18ad889fc6879b24d0afa4c60f1c911..50016d70636221b9066801b2879dacdb634d7289 100644
--- a/scripts/Diffraction/isis_powder/routines/yaml_parser.py
+++ b/scripts/Diffraction/isis_powder/routines/yaml_parser.py
@@ -15,7 +15,8 @@ def get_run_dictionary(run_number_string, file_path):
     run_key = _find_dictionary_key(dict_to_search=config_file, run_number=run_number_string)
 
     if not run_key:
-        raise ValueError("Run number " + str(run_number_string) + " not recognised in calibration mapping")
+        raise ValueError("Run number " + str(run_number_string) +
+                         " not recognised in cycle mapping file at " + str(file_path))
 
     return config_file[run_key]
 
diff --git a/scripts/Inelastic/CrystalField/fitting.py b/scripts/Inelastic/CrystalField/fitting.py
index 99d37f2799514a017c6fdac467ddbbcd71b11e60..14ea524248f6cbffa1a2b328ac1cd6e279ad396a 100644
--- a/scripts/Inelastic/CrystalField/fitting.py
+++ b/scripts/Inelastic/CrystalField/fitting.py
@@ -113,50 +113,53 @@ class CrystalField(object):
                                           Note that physical properties datasets should follow inelastic spectra
                                           See the Crystal Field Python Interface help page for more details.
         """
-        from .function import PeaksFunction
+
+        self._background = None
+
+        if 'Temperature' in kwargs:
+            temperature = kwargs['Temperature']
+            del kwargs['Temperature']
+        else:
+            temperature = -1
+
+        # Create self.function attribute
+        self._makeFunction(Ion, Symmetry, temperature)
+        self.Temperature = temperature
         self.Ion = Ion
-        self._symmetry = Symmetry
-        self._toleranceEnergy = 1e-10
-        self._toleranceIntensity = 1e-1
-        self._fieldParameters = {}
-        self._fieldTies = {}
-        self._fieldConstraints = []
-        self._temperature = None
-        self._FWHM = None
-        self._intensityScaling = None
+        self.Symmetry = Symmetry
         self._resolutionModel = None
-        self._fwhmVariation = None
-        self._fixAllPeaks = False
         self._physprop = None
 
+        free_parameters = []
         for key in kwargs:
             if key == 'ToleranceEnergy':
-                self._toleranceEnergy = kwargs[key]
+                self.ToleranceEnergy = kwargs[key]
             elif key == 'ToleranceIntensity':
-                self._toleranceIntensity = kwargs[key]
+                self.ToleranceIntensity = kwargs[key]
             elif key == 'IntensityScaling':
-                self._intensityScaling = kwargs[key]
+                self.IntensityScaling = kwargs[key]
             elif key == 'FWHM':
-                self._FWHM = kwargs[key]
+                self.FWHM = kwargs[key]
             elif key == 'ResolutionModel':
                 self.ResolutionModel = kwargs[key]
-            elif key == 'Temperature':
-                self._temperature = kwargs[key]
+            elif key == 'NPeaks':
+                self.NPeaks = kwargs[key]
             elif key == 'FWHMVariation':
-                self._fwhmVariation = kwargs[key]
+                self.FWHMVariation = kwargs[key]
             elif key == 'FixAllPeaks':
-                self._fixAllPeaks = kwargs[key]
+                self.FixAllPeaks = kwargs[key]
             elif key == 'PhysicalProperty':
-                self._physprop = kwargs[key]
+                self.PhysicalProperty = kwargs[key]
             else:
                 # Crystal field parameters
-                self._fieldParameters[key] = kwargs[key]
+                self.function.setParameter(key, kwargs[key])
+                free_parameters.append(key)
 
-        if isinstance(self._temperature, list) or isinstance(self._temperature, np.ndarray):
-            self.peaks = [PeaksFunction(firstIndex=1) for _ in self._temperature]
-        else:
-            self.peaks = PeaksFunction()
-        self.background = None
+        for param in CrystalField.field_parameter_names:
+            if param not in free_parameters:
+                self.function.fixParameter(param)
+
+        self._setPeaks()
 
         # Eigensystem
         self._dirty_eigensystem = True
@@ -173,67 +176,70 @@ class CrystalField(object):
         self._spectra = {}
         self._plot_window = {}
 
-        self._setDefaultTies()
+        # self._setDefaultTies()
         self.chi2 = None
 
+    def _makeFunction(self, ion, symmetry, temperature):
+        from mantid.simpleapi import FunctionFactory
+        if temperature is not None and islistlike(temperature) and len(temperature) > 1:
+            self.function = FunctionFactory.createFunction('CrystalFieldMultiSpectrum')
+            self._isMultiSpectrum = True
+            tempStr = 'Temperatures'
+        else:
+            self.function = FunctionFactory.createFunction('CrystalFieldSpectrum')
+            self._isMultiSpectrum = False
+            tempStr = 'Temperature'
+        self.function.setAttributeValue('Ion', ion)
+        self.function.setAttributeValue('Symmetry', symmetry)
+        if temperature:
+            temperature = [float(val) for val in temperature] if islistlike(temperature) else float(temperature)
+            self.function.setAttributeValue(tempStr, temperature)
+
+    def _remakeFunction(self, temperature):
+        """Redefines the internal function, e.g. when `Temperature` (number of datasets) change"""
+        fieldParams = self._getFieldParameters()
+        self._makeFunction(self.Ion, self.Symmetry, temperature)
+        for item in fieldParams.items():
+            self.function.setParameter(item[0], item[1])
+        for param in CrystalField.field_parameter_names:
+            if param not in fieldParams.keys():
+                self.function.fixParameter(param)
+
+    def _setPeaks(self):
+        from .function import PeaksFunction
+        if self._isMultiSpectrum:
+            self._peaks = []
+            for i in range(self.NumberOfSpectra):
+                self._peaks.append(PeaksFunction(self.crystalFieldFunction, 'f%s.' % i, 1))
+        else:
+            self._peaks = PeaksFunction(self.crystalFieldFunction, '', 0)
+
+    @property
+    def crystalFieldFunction(self):
+        if not self._isMultiSpectrum and self.background is not None:
+            return self.function[1]
+        else:
+            return self.function
+
     def makePeaksFunction(self, i):
         """Form a definition string for the CrystalFieldPeaks function
         @param i: Index of a spectrum.
         """
         temperature = self._getTemperature(i)
-        out = 'name=CrystalFieldPeaks,Ion=%s,Symmetry=%s,Temperature=%s' % (self._ion, self._symmetry, temperature)
-        out += ',ToleranceEnergy=%s,ToleranceIntensity=%s' % (self._toleranceEnergy, self._toleranceIntensity)
-        out += ',%s' % ','.join(['%s=%s' % item for item in self._fieldParameters.items()])
+        out = 'name=CrystalFieldPeaks,Ion=%s,Symmetry=%s,Temperature=%s' % (self.Ion, self.Symmetry, temperature)
+        out += ',ToleranceEnergy=%s,ToleranceIntensity=%s' % (self.ToleranceEnergy, self.ToleranceIntensity)
+        out += ',%s' % ','.join(['%s=%s' % item for item in self._getFieldParameters().items()])
         return out
 
     def makeSpectrumFunction(self, i=0):
         """Form a definition string for the CrystalFieldSpectrum function
         @param i: Index of a spectrum.
         """
-        from .function import Background
-        temperature = self._getTemperature(i)
-        out = 'name=CrystalFieldSpectrum,Ion=%s,Symmetry=%s,Temperature=%s' % (self._ion, self._symmetry, temperature)
-        out += ',ToleranceEnergy=%s,ToleranceIntensity=%s' % (self._toleranceEnergy, self._toleranceIntensity)
-        out += ',FixAllPeaks=%s' % (1 if self._fixAllPeaks else 0)
-        out += ',PeakShape=%s' % self.getPeak(i).name
-        if self._intensityScaling is not None:
-            out += ',IntensityScaling=%s' % self._getIntensityScaling(i)
-        if self._FWHM is not None:
-            out += ',FWHM=%s' % self._getFWHM(i)
-        if len(self._fieldParameters) > 0:
-            out += ',%s' % ','.join(['%s=%s' % item for item in self._fieldParameters.items()])
-        if self._resolutionModel is not None:
-            if self._resolutionModel.multi:
-                model = self._resolutionModel.model[i]
-            else:
-                model = self._resolutionModel.model
-            out += ',FWHMX=%s,FWHMY=%s' % tuple(map(tuple, model))
-            if self._fwhmVariation is not None:
-                out += ',FWHMVariation=%s' % self._fwhmVariation
-
-        peaks = self.getPeak(i)
-        params = peaks.paramString('', 0)
-        if len(params) > 0:
-            out += ',%s' % params
-        ties = peaks.tiesString()
-        if len(ties) > 0:
-            out += ',%s' % ties
-        constraints = peaks.constraintsString()
-        if len(constraints) > 0:
-            out += ',%s' % constraints
-        if self.background is not None:
-            if isinstance(self.background, Background):
-                bgOut = self.background.toString()
-            else:
-                bgOut = self.background[i].toString()
-            out = '%s;%s' % (bgOut, out)
-        ties = self.getFieldTies()
-        if len(ties) > 0:
-            out += ',ties=(%s)' % ties
-        constraints = self.getFieldConstraints()
-        if len(constraints) > 0:
-            out += ',constraints=(%s)' % constraints
-        return out
+        if not self._isMultiSpectrum:
+            return str(self.function)
+        else:
+            funs = self.function.createEquivalentFunctions()
+            return str(funs[i])
 
     def makePhysicalPropertiesFunction(self, i=0):
         """Form a definition string for one of the crystal field physical properties functions
@@ -249,132 +255,21 @@ class CrystalField(object):
                 out = ppobj.toString()
             else:
                 return ''
-        out += ',Ion=%s,Symmetry=%s' % (self._ion, self._symmetry)
-        if len(self._fieldParameters) > 0:
-            out += ',%s' % ','.join(['%s=%s' % item for item in self._fieldParameters.items()])
-        ties = self.getFieldTies()
+        out += ',Ion=%s,Symmetry=%s' % (self.Ion, self.Symmetry)
+        fieldParams = self._getFieldParameters()
+        if len(fieldParams) > 0:
+            out += ',%s' % ','.join(['%s=%s' % item for item in fieldParams.items()])
+        ties = self._getFieldTies()
         if len(ties) > 0:
             out += ',ties=(%s)' % ties
-        constraints = self.getFieldConstraints()
+        constraints = self._getFieldConstraints()
         if len(constraints) > 0:
             out += ',constraints=(%s)' % constraints
         return out
 
-    def _makeMultiAttributes(self):
-        """
-        Make the main attribute part of the function string for makeMultiSpectrumFunction()
-        """
-        # Handles physical properties (PP). self._temperature applies only for INS datasets. But the
-        # C++ CrystalFieldMultiSpectrum uses it to count number of datasets, so we need to set it here
-        # as a concatenation of the INS (self._temperature and self._FWHM) and PP (self._physprop)
-        if self._temperature is None:
-            if self._physprop is None:
-                errmsg = 'Cannot run fit: No temperature (INS spectrum) or physical properties defined.'
-                raise RuntimeError(errmsg)
-            physprop = []
-            temperature = []
-            FWHM = []
-        else:
-            physprop = (len(self._temperature) if islistlike(self._temperature) else 1) * [None]
-            temperature = self._temperature if islistlike(self._temperature) else [self._temperature]
-            FWHM = self._FWHM if islistlike(self._FWHM) else [self._FWHM]
-        if self._physprop is not None:
-            for pp in (self._physprop if islistlike(self._physprop) else [self._physprop]):
-                temperature.append(pp.Temperature if (pp.Temperature is not None) else 0.)
-                FWHM.append(0.)
-                physprop.append(pp)
-            ppid = [0 if pp is None else pp.TypeID for pp in physprop]
-            ppenv = [pp.envString(i) for i, pp in enumerate(physprop) if pp is not None]
-            ppenv = filter(None, ppenv)
-        out = ',ToleranceEnergy=%s,ToleranceIntensity=%s' % (self._toleranceEnergy, self._toleranceIntensity)
-        out += ',PeakShape=%s' % self.getPeak().name
-        out += ',FixAllPeaks=%s' % (1 if self._fixAllPeaks else 0)
-        if self.background is not None:
-            out += ',Background=%s' % self.background[0].nameString()
-        out += ',Temperatures=(%s)' % ','.join(map(str, temperature))
-        if self._physprop is not None:
-            out += ',PhysicalProperties=(%s)' % ','.join(map(str, ppid))
-            out += ',%s' % ','.join(map(str, ppenv))
-        if self._FWHM is not None:
-            out += ',FWHMs=(%s)' % ','.join(map(str, FWHM))
-        if self._intensityScaling is not None:
-            for i in range(len(self._intensityScaling)):
-                out += ',IntensityScaling%s=%s' % (i, self._intensityScaling[i])
-        return out
-
-    def _makeMultiResolutionModel(self):
-        """
-        Make the resolution model part of the function string for makeMultiSpectrumFunction()
-        """
-        out = ''
-        if self._resolutionModel is not None:
-            i = 0
-            for model in self._resolutionModel.model:
-                out += ',FWHMX{0}={1},FWHMY{0}={2}'.format(i, tuple(model[0]), tuple(model[1]))
-                i += 1
-            if self._fwhmVariation is not None:
-                out += ',FWHMVariation=%s' % self._fwhmVariation
-        return out
-
-    def _makeMultiPeaks(self):
-        """
-        Make the peaks part of the function string for makeMultiSpectrumFunction()
-        """
-        out = ''
-        i = 0
-        for peaks in (self.peaks if islistlike(self.peaks) else [self.peaks]):
-            parOut = peaks.paramString('f%s.' % i, 1)
-            if len(parOut) > 0:
-                out += ',%s' % parOut
-            tiesOut = peaks.tiesString('f%s.' % i)
-            if len(tiesOut) > 0:
-                out += ',%s' % tiesOut
-            constraintsOut = peaks.constraintsString('f%s.' % i)
-            if len(constraintsOut) > 0:
-                out += ',%s' % constraintsOut
-            i += 1
-        return out
-
-    # pylint: disable=too-many-public-branches
     def makeMultiSpectrumFunction(self):
-        """Form a definition string for the CrystalFieldMultiSpectrum function"""
-        out = 'name=CrystalFieldMultiSpectrum,Ion=%s,Symmetry=%s' % (self._ion, self._symmetry)
-        out += self._makeMultiAttributes()
-        out += ',%s' % ','.join(['%s=%s' % item for item in self._fieldParameters.items()])
-
-        tieList = []
-        constraintsList = []
-        if self.background is not None:
-            i = 0
-            for background in self.background:
-                prefix = 'f%s.f0.' % i
-                bgOut = background.paramString(prefix)
-                if len(bgOut) > 0:
-                    out += ',%s' % bgOut
-                tieOut = background.tiesString(prefix)
-                if len(tieOut) > 0:
-                    tieList.append(tieOut)
-                constraintsOut = background.constraintsString(prefix)
-                if len(constraintsOut) > 0:
-                    constraintsList.append(constraintsOut)
-                i += 1
-        if self._temperature is not None:
-            out += self._makeMultiResolutionModel()
-            out += self._makeMultiPeaks()
-
-        ties = self.getFieldTies()
-        if len(ties) > 0:
-            tieList.append(ties)
-        ties = ','.join(tieList)
-        if len(ties) > 0:
-            out += ',ties=(%s)' % ties
-        constraints = self.getFieldConstraints()
-        if len(constraints) > 0:
-            constraintsList.append(constraints)
-        constraints = ','.join(constraintsList)
-        if len(constraints) > 0:
-            out += ',constraints=(%s)' % constraints
-        return out
+        import re
+        return re.sub(r'FWHM[X|Y]\d+=\(\),', '', str(self.function))
 
     @property
     def Ion(self):
@@ -384,7 +279,7 @@ class CrystalField(object):
         ...
         ion = cf.Ion
         """
-        return self._ion
+        return self.crystalFieldFunction.getAttributeValue('Ion')
 
     @Ion.setter
     def Ion(self, value):
@@ -395,19 +290,10 @@ class CrystalField(object):
         cf.Ion = 'Pr'
         """
         if value not in self.ion_nre_map.keys():
-            msg = 'Value %s is not allowed for attribute Ion.\nList of allowed values: %s' %\
+            msg = 'Value %s is not allowed for attribute Ion.\nList of allowed values: %s' % \
                   (value, ', '.join(list(self.ion_nre_map.keys())))
-            arbitraryJ = re.match('[SJsj]([0-9\.]+)', value)
-            if arbitraryJ and (float(arbitraryJ.group(1)) % 0.5) == 0:
-                value = arbitraryJ.group(0)
-                self._nre = int(-float(arbitraryJ.group(1)) * 2.)
-                if self._nre < -99:
-                    raise RuntimeError('J value ' + str(-self._nre / 2) + ' is too large.')
-            else:
-                raise RuntimeError(msg+', S<n>, J<n>')
-        else:
-            self._nre = self.ion_nre_map[value]
-        self._ion = value
+            raise RuntimeError(msg)
+        self.crystalFieldFunction.setAttributeValue('Ion', value)
         self._dirty_eigensystem = True
         self._dirty_peaks = True
         self._dirty_spectra = True
@@ -420,7 +306,7 @@ class CrystalField(object):
         ...
         symm = cf.Symmetry
         """
-        return self._symmetry
+        return self.crystalFieldFunction.getAttributeValue('Symmetry')
 
     @Symmetry.setter
     def Symmetry(self, value):
@@ -434,7 +320,7 @@ class CrystalField(object):
             msg = 'Value %s is not allowed for attribute Symmetry.\nList of allowed values: %s' % \
                   (value, ', '.join(self.allowed_symmetries))
             raise RuntimeError(msg)
-        self._symmetry = value
+        self.crystalFieldFunction.setAttributeValue('Symmetry', value)
         self._dirty_eigensystem = True
         self._dirty_peaks = True
         self._dirty_spectra = True
@@ -442,67 +328,119 @@ class CrystalField(object):
     @property
     def ToleranceEnergy(self):
         """Get energy tolerance"""
-        return self._toleranceEnergy
+        return self.crystalFieldFunction.getAttributeValue('ToleranceEnergy')
 
     @ToleranceEnergy.setter
     def ToleranceEnergy(self, value):
         """Set energy tolerance"""
-        self._toleranceEnergy = value
+        self.crystalFieldFunction.setAttributeValue('ToleranceEnergy', float(value))
         self._dirty_peaks = True
         self._dirty_spectra = True
 
     @property
     def ToleranceIntensity(self):
         """Get intensity tolerance"""
-        return self._toleranceIntensity
+        return self.crystalFieldFunction.getAttributeValue('ToleranceIntensity')
 
     @ToleranceIntensity.setter
     def ToleranceIntensity(self, value):
         """Set intensity tolerance"""
-        self._toleranceIntensity = value
+        self.crystalFieldFunction.setAttributeValue('ToleranceIntensity', float(value))
         self._dirty_peaks = True
         self._dirty_spectra = True
 
     @property
     def IntensityScaling(self):
-        return self._intensityScaling
+        if not self._isMultiSpectrum:
+            return self.crystalFieldFunction.getParameterValue('IntensityScaling')
+        iscaling = []
+        for i in range(self.NumberOfSpectra):
+            paramName = 'IntensityScaling%s' % i
+            iscaling.append(self.crystalFieldFunction.getParameterValue(paramName))
+        return iscaling
 
     @IntensityScaling.setter
     def IntensityScaling(self, value):
-        self._intensityScaling = value
+        if not self._isMultiSpectrum:
+            if islistlike(value):
+                if len(value) == 1:
+                    value = value[0]
+                else:
+                    raise ValueError('IntensityScaling is expected to be a single floating point value')
+            self.crystalFieldFunction.setParameter('IntensityScaling', value)
+        else:
+            n = self.NumberOfSpectra
+            if not islistlike(value) or len(value) != n:
+                raise ValueError('IntensityScaling is expected to be a list of %s values' % n)
+            for i in range(n):
+                paramName = 'IntensityScaling%s' % i
+                self.crystalFieldFunction.setParameter(paramName, value[i])
+
         self._dirty_peaks = True
         self._dirty_spectra = True
 
     @property
     def Temperature(self):
-        return self._temperature
+        attrName = 'Temperatures' if self._isMultiSpectrum else 'Temperature'
+        return self.crystalFieldFunction.getAttributeValue(attrName)
 
     @Temperature.setter
     def Temperature(self, value):
-        lenval = len(value) if islistlike(value) else 1
-        lentemp = len(self._temperature) if islistlike(self._temperature) else 1
-        self._temperature = value
+        if islistlike(value) and len(value) == 1:
+            value = value[0]
+        if self._isMultiSpectrum:
+            if not islistlike(value):
+                # Try to keep current set of field parameters.
+                self._remakeFunction(float(value))
+                return
+            self.crystalFieldFunction.setAttributeValue('Temperatures', value)
+        else:
+            if islistlike(value):
+                self._remakeFunction(value)
+                return
+            self.crystalFieldFunction.setAttributeValue('Temperature', float(value))
         self._dirty_peaks = True
         self._dirty_spectra = True
-        if lenval != lentemp:
-            peakname = self.peaks[0].name if isinstance(self.peaks, list) else self.peaks.name
-            self.setPeaks(peakname)
 
     @property
     def FWHM(self):
-        return self._FWHM
+        attrName = 'FWHMs' if self._isMultiSpectrum else 'FWHM'
+        fwhm = self.crystalFieldFunction.getAttributeValue(attrName)
+        if self._isMultiSpectrum:
+            nDatasets = len(self.Temperature)
+            if len(fwhm) != nDatasets:
+                return list(fwhm) * nDatasets
+        return fwhm
 
     @FWHM.setter
     def FWHM(self, value):
-        self._FWHM = value
+        if islistlike(value) and len(value) == 1:
+            value = value[0]
+        if self._isMultiSpectrum:
+            if not islistlike(value):
+                value = [value] * self.NumberOfSpectra
+            self.crystalFieldFunction.setAttributeValue('FWHMs', value)
+        else:
+            if islistlike(value):
+                raise ValueError('FWHM is expected to be a single floating point value')
+            self.crystalFieldFunction.setAttributeValue('FWHM', float(value))
+        self._dirty_spectra = True
+
+    @property
+    def FWHMVariation(self):
+        return self.crystalFieldFunction.getAttributeValue('FWHMVariation')
+
+    @FWHMVariation.setter
+    def FWHMVariation(self, value):
+        self.crystalFieldFunction.setAttributeValue('FWHMVariation', float(value))
         self._dirty_spectra = True
 
     def __getitem__(self, item):
-        return self._fieldParameters[item]
+        return self.crystalFieldFunction.getParameterValue(item)
 
     def __setitem__(self, key, value):
         self._dirty_spectra = True
-        self._fieldParameters[key] = value
+        self.crystalFieldFunction.setParameter(key, value)
 
     @property
     def ResolutionModel(self):
@@ -515,18 +453,93 @@ class CrystalField(object):
             self._resolutionModel = value
         else:
             self._resolutionModel = ResolutionModel(value)
+        if self._isMultiSpectrum:
+            if not self._resolutionModel.multi or self._resolutionModel.NumberOfSpectra != self.NumberOfSpectra:
+                raise RuntimeError('Resolution model is expected to have %s functions, found %s' %
+                                   (self.NumberOfSpectra, self._resolutionModel.NumberOfSpectra))
+            for i in range(self.NumberOfSpectra):
+                model = self._resolutionModel.model[i]
+                self.crystalFieldFunction.setAttributeValue('FWHMX%s' % i, model[0])
+                self.crystalFieldFunction.setAttributeValue('FWHMY%s' % i, model[1])
+        else:
+            model = self._resolutionModel.model
+            self.crystalFieldFunction.setAttributeValue('FWHMX', model[0])
+            self.crystalFieldFunction.setAttributeValue('FWHMY', model[1])
 
     @property
     def FixAllPeaks(self):
-        return self._fixAllPeaks
+        return self.crystalFieldFunction.getAttributeValue('FixAllPeaks')
 
     @FixAllPeaks.setter
     def FixAllPeaks(self, value):
-        self._fixAllPeaks = value
+        self.crystalFieldFunction.setAttributeValue('FixAllPeaks', value)
+
+    @property
+    def PeakShape(self):
+        return self.crystalFieldFunction.getAttributeValue('PeakShape')
+
+    @PeakShape.setter
+    def PeakShape(self, value):
+        self.crystalFieldFunction.setAttributeValue('PeakShape', value)
 
     @property
     def NumberOfSpectra(self):
-        return len(self._temperature)
+        return self.crystalFieldFunction.getNumberDomains()
+
+    @property
+    def NPeaks(self):
+        return self.crystalFieldFunction.getAttributeValue('NPeaks')
+
+    @NPeaks.setter
+    def NPeaks(self, value):
+        self.crystalFieldFunction.setAttributeValue('NPeaks', value)
+
+    @property
+    def peaks(self):
+        return self._peaks
+
+    @property
+    def background(self):
+        return self._background
+
+    @background.setter
+    def background(self, value):
+        """
+        Define the background function.
+        Args:
+            value: an instance of function.Background class or a list of instances
+                in a multi-spectrum case
+        """
+        from .function import Background
+        from mantid.simpleapi import FunctionFactory
+        if self._background is not None:
+            raise ValueError('Background has been set already')
+        if not isinstance(value, Background):
+            raise TypeError('Expected a Background object, found %s' % str(value))
+        if not self._isMultiSpectrum:
+            fun_str = value.toString() + ';' + str(self.function)
+            self.function = FunctionFactory.createInitialized(fun_str)
+            self._background = self._makeBackgroundObject(value)
+            self._setPeaks()
+        else:
+            self.function.setAttributeValue("Background", value.toString())
+            self._background = []
+            for ispec in range(self.NumberOfSpectra):
+                prefix = 'f%s.' % ispec
+                self._background.append(self._makeBackgroundObject(value, prefix))
+
+    def _makeBackgroundObject(self, value, prefix=''):
+        from .function import Background, Function
+        if value.peak is not None and value.background is not None:
+            peak = Function(self.function, prefix=prefix + 'f0.f0.')
+            background = Function(self.function, prefix=prefix + 'f0.f1.')
+        elif value.peak is not None:
+            peak = Function(self.function, prefix=prefix + 'f0.')
+            background = None
+        elif value.background is not None:
+            peak = None
+            background = Function(self.function, prefix=prefix + 'f0.')
+        return Background(peak=peak, background=background)
 
     @property
     def PhysicalProperty(self):
@@ -537,21 +550,37 @@ class CrystalField(object):
         from .function import PhysicalProperties
         vlist = value if islistlike(value) else [value]
         if all([isinstance(pp, PhysicalProperties) for pp in vlist]):
+            nOldPP = len(self._physprop) if islistlike(self._physprop) else (0 if self._physprop is None else 1)
             self._physprop = value
         else:
             errmsg = 'PhysicalProperty input must be a PhysicalProperties'
             errmsg += ' instance or a list of such instances'
             raise ValueError(errmsg)
+        # If a spectrum (temperature) is already defined, or multiple physical properties
+        # given, redefine the CrystalFieldMultiSpectrum function.
+        if not self.isPhysicalPropertyOnly or islistlike(self.PhysicalProperty):
+            tt = self.Temperature if islistlike(self.Temperature) else [self.Temperature]
+            ww = list(self.FWHM) if islistlike(self.FWHM) else [self.FWHM]
+            # Last n-set of temperatures correspond to PhysicalProperties
+            if nOldPP > 0:
+                tt = tt[:-nOldPP]
+            # Removes 'negative' temperature, which is a flag for no INS dataset
+            tt = [val for val in tt if val > 0]
+            pptt = [0 if val.Temperature is None else val.Temperature for val in vlist]
+            self._remakeFunction(list(tt)+pptt)
+            if len(tt) > 0 and len(pptt) > 0:
+                ww += [0] * len(pptt)
+            self.FWHM = ww
+            ppids = [pp.TypeID for pp in vlist]
+            self.function.setAttributeValue('PhysicalProperties', [0]*len(tt)+ppids)
+            for attribs in [pp.getAttributes(i+len(tt)) for i, pp in enumerate(vlist)]:
+                for item in attribs.items():
+                    self.function.setAttributeValue(item[0], item[1])
 
     @property
     def isPhysicalPropertyOnly(self):
-        return self.Temperature is None and self.PhysicalProperty
-
-    @property
-    def numPhysicalPropertyData(self):
-        if self._physprop:
-            return len(self._physprop) if islistlike(self._physprop) else 1
-        return 0
+        return (not islistlike(self.Temperature) and self.Temperature < 0
+                and self.PhysicalProperty is not None)
 
     def ties(self, **kwargs):
         """Set ties on the field parameters.
@@ -561,7 +590,7 @@ class CrystalField(object):
                 tie(B20 = 0.1, IB23 = '2*B23')
         """
         for tie in kwargs:
-            self._fieldTies[tie] = kwargs[tie]
+            self.crystalFieldFunction.tie(tie, str(kwargs[tie]))
 
     def constraints(self, *args):
         """
@@ -570,28 +599,7 @@ class CrystalField(object):
         @param args: A list of constraints. For example:
                 constraints('B00 > 0', '0.1 < B43 < 0.9')
         """
-        self._fieldConstraints += args
-
-    def setPeaks(self, name):
-        """Define the shape of the peaks and create PeakFunction instances."""
-        from .function import PeaksFunction
-        if self._temperature is None or not isinstance(self._temperature, list):
-            self.peaks = PeaksFunction(name, firstIndex=0)
-        else:
-            self.peaks = [PeaksFunction(name, firstIndex=1) for _ in self._temperature]
-
-    def setBackground(self, peak=None, background=None):
-        from .function import Background
-        if isinstance(self._temperature, list):
-            self.background = len(self._temperature) * Background(peak=peak, background=background)
-        else:
-            self.background = Background(peak=peak, background=background)
-
-    def getPeak(self, i=0):
-        if isinstance(self.peaks, list):
-            return self.peaks[i]
-        else:
-            return self.peaks
+        self.crystalFieldFunction.addConstraints(','.join(args))
 
     def getEigenvalues(self):
         self._calcEigensystem()
@@ -647,6 +655,9 @@ class CrystalField(object):
             wksp = i
             i = 0
 
+        if (self.Temperature[i] if islistlike(self.Temperature) else self.Temperature) < 0:
+            raise RuntimeError('You must first define a temperature for the spectrum')
+
         # Workspace is given, always calculate
         if wksp is None:
             xArray = None
@@ -731,7 +742,6 @@ class CrystalField(object):
         # _calcSpectrum updates parameters and susceptibility has a 'Lambda' parameter which other
         # CF functions don't have. This causes problems if you want to calculate another quantity after
         x, y = self._getPhysProp(PhysicalProperties('chi', *args, **kwargs), workspace, ws_index)
-        self._fieldParameters.pop('Lambda', None)
         return x, y
 
     def getMagneticMoment(self, *args, **kwargs):
@@ -819,7 +829,7 @@ class CrystalField(object):
         createWS.initialize()
 
         xArray, yArray = self.getSpectrum(i, workspace, ws_index)
-        ws_name = name if name is not None else 'CrystalField_%s' % self._ion
+        ws_name = name if name is not None else 'CrystalField_%s' % self.Ion
 
         if isinstance(i, int):
             if workspace is None:
@@ -848,97 +858,19 @@ class CrystalField(object):
             createWS.execute()
             plotSpectrum(ws_name, 0)
 
-    def _setDefaultTies(self):
-        for name in self.field_parameter_names:
-            if name not in self._fieldParameters:
-                self._fieldTies[name] = '0'
-
-    def getFieldTies(self):
-        ties = ['%s=%s' % item for item in self._fieldTies.items()]
-        return ','.join(ties)
-
-    def getFieldConstraints(self):
-        return ','.join(self._fieldConstraints)
-
-    def updateParameters(self, func):
-        """
-        Update values of the field and peaks parameters.
-        @param func: A IFunction object containing new parameter values.
-        """
-        for i in range(func.nParams()):
-            par = func.parameterName(i)
-            value = func.getParameterValue(i)
-            if par == 'IntensityScaling':
-                self._intensityScaling = value
-            else:
-                match = re.match(FN_PATTERN, par)
-                if match:
-                    i = int(match.group(1))
-                    par = match.group(2)
-                    self.peaks.param[i][par] = value
-                else:
-                    self._fieldParameters[par] = value
-
     def update(self, func):
         """
         Update values of the fitting parameters.
         @param func: A IFunction object containing new parameter values.
         """
-        from mantid.api import CompositeFunction
-        if isinstance(func, CompositeFunction):
-            nFunc = len(func)
-            if nFunc == 3:
-                self.background.update(func[0], func[1])
-                self.updateParameters(func[2])
-            elif nFunc == 2:
-                self.background.update(func[0])
-                self.updateParameters(func[1])
+        self.function = func
+        if self._background is not None:
+            if isinstance(self._background, list):
+                for background in self._background:
+                    background.update(func)
             else:
-                raise RuntimeError('CompositeFunuction cannot have more than 3 components.')
-        else:
-            self.updateParameters(func)
-
-    def update_multi(self, func):
-        """
-        Update values of the fitting parameters in case of a multi-spectrum function.
-        @param func: A IFunction object containing new parameter values.
-        """
-        from .function import Function
-        for i in range(func.nParams()):
-            par = func.parameterName(i)
-            value = func.getParameterValue(i)
-            match = re.match(FN_MS_PATTERN, par)
-            if match:
-                ispec = int(match.group(1))
-                ipeak = int(match.group(2))
-                par = match.group(3)
-                if ipeak == 0:
-                    if self.background is None:
-                        self.setBackground(background=Function(self.default_background))
-                    background = (self.background[ispec]
-                                  if islistlike(self.background) else self.background)
-                    bgMatch = re.match(FN_PATTERN, par)
-                    if bgMatch:
-                        i = int(bgMatch.group(1))
-                        par = bgMatch.group(2)
-                        if i == 0:
-                            background.peak.param[par] = value
-                        else:
-                            background.background.param[par] = value
-                    else:
-                        if background.peak is not None:
-                            background.peak.param[par] = value
-                        elif background.background is not None:
-                            background.background.param[par] = value
-                        else:
-                            raise RuntimeError('Background is undefined in CrystalField instance.')
-                else:
-                    if islistlike(self.peaks):
-                        self.peaks[ispec].param[ipeak - 1][par] = value
-                    else:
-                        self.peaks.param[ipeak - 1][par] = value
-            else:
-                self._fieldParameters[par] = value
+                self._background.update(func)
+        self._setPeaks()
 
     def calc_xmin_xmax(self, i):
         """Calculate the x-range containing interesting features of a spectrum (for plotting)
@@ -956,58 +888,6 @@ class CrystalField(object):
         x_max += deltaX
         return x_min, x_max
 
-    def check_consistency(self):
-        """ Checks that list input variables are consistent """
-        if not self._temperature:
-            return 0
-        # Number of datasets is implied by temperature.
-        nDataset = len(self._temperature) if islistlike(self._temperature) else 1
-        nFWHM = len(self._FWHM) if islistlike(self._FWHM) else 1
-        nIntensity = len(self._intensityScaling) if islistlike(self._intensityScaling) else 1
-        nPeaks = len(self.peaks) if islistlike(self.peaks) else 1
-        # Consistent if temperature, FWHM, intensityScale are lists with same len
-        # Or if FWHM, intensityScale are 1-element list or scalar
-        if (nFWHM != nDataset and nFWHM != 1) or (nIntensity != nDataset and nIntensity != 1):
-            errmsg = 'The Temperature, FWHM, and IntensityScaling properties have different '
-            errmsg += 'number of elements implying different number of spectra.'
-            raise ValueError(errmsg)
-        # This should not occur, but may do if the user changes the temperature(s) after
-        # initialisation. In which case, we reset the peaks, giving a warning.
-        if nPeaks != nDataset:
-            from .function import PeaksFunction
-            errmsg = 'Internal inconsistency between number of spectra and list of '
-            errmsg += 'temperatures. Changing number of spectra to match temperature. '
-            errmsg += 'This may reset some peaks constraints / limits'
-            warnings.warn(errmsg, RuntimeWarning)
-            if len(self.peaks) > nDataset:           # Truncate
-                self.peaks = self.peaks[0:nDataset]
-            else:                                    # Append empty PeaksFunctions
-                for i in range(len(self.peaks), nDataset):
-                    self.peaks.append(PeaksFunction(self.peaks[0].name(), firstIndex=0))
-        # Convert to all scalars if only one dataset
-        if nDataset == 1:
-            if islistlike(self._temperature) and self._temperature is not None:
-                self._temperature = self._temperature[0]
-                if islistlike(self.peaks):
-                    self.peaks = self.peaks[0]
-            if islistlike(self._FWHM) and self._FWHM is not None:
-                self._FWHM = self._FWHM[0]
-            if islistlike(self._intensityScaling) and self._intensityScaling is not None:
-                self._intensityScaling = self._intensityScaling[0]
-        # Convert to list of same size if multidatasets
-        else:
-            if nFWHM == 1 and self._FWHM is not None:
-                if islistlike(self._FWHM):
-                    self._FWHM *= nDataset
-                else:
-                    self._FWHM = nDataset * [self._FWHM]
-            if nIntensity == 1 and self._intensityScaling is not None:
-                if islistlike(self._intensityScaling):
-                    self._intensityScaling *= nDataset
-                else:
-                    self._intensityScaling = nDataset * [self._intensityScaling]
-        return nDataset
-
     def __add__(self, other):
         if isinstance(other, CrystalFieldMulti):
             return other.__radd__(self)
@@ -1025,32 +905,28 @@ class CrystalField(object):
 
     def _getTemperature(self, i):
         """Get temperature value for i-th spectrum."""
-        if self._temperature is None:
-            raise RuntimeError('Temperature must be set.')
-        if isinstance(self._temperature, float) or isinstance(self._temperature, int):
+        if not self._isMultiSpectrum:
             if i != 0:
                 raise RuntimeError('Cannot evaluate spectrum %s. Only 1 temperature is given.' % i)
-            return float(self._temperature)
+            return float(self.Temperature)
         else:
-            nTemp = len(self._temperature)
+            temperatures = self.Temperature
+            nTemp = len(temperatures)
             if -nTemp <= i < nTemp:
-                return float(self._temperature[i])
+                return float(temperatures[i])
             else:
                 raise RuntimeError('Cannot evaluate spectrum %s. Only %s temperatures are given.' % (i, nTemp))
 
     def _getFWHM(self, i):
         """Get default FWHM value for i-th spectrum."""
-        if self._FWHM is None:
-            raise RuntimeError('Default FWHM must be set.')
-        if isinstance(self._FWHM, float) or isinstance(self._FWHM, int):
+        if not self._isMultiSpectrum:
             # if i != 0 assume that value for all spectra
-            return float(self._FWHM)
+            return float(self.FWHM)
         else:
-            nFWHM = len(self._FWHM)
-            if i >= -nFWHM and i < nFWHM:
-                return float(self._FWHM[i])
-            elif nFWHM == 1:
-                return self._FWHM[0]
+            fwhm = self.FWHM
+            nFWHM = len(fwhm)
+            if -nFWHM <= i < nFWHM:
+                return float(fwhm[i])
             else:
                 raise RuntimeError('Cannot get FWHM for spectrum %s. Only %s FWHM are given.' % (i, nFWHM))
 
@@ -1068,6 +944,29 @@ class CrystalField(object):
             return self.peaks[i]
         return self.peaks
 
+    def _getFieldParameters(self):
+        """
+        Get the values of non-zero field parameters.
+        Returns:
+            a dict with name: value pairs.
+        """
+        params = {}
+        for name in self.field_parameter_names:
+            value = self.crystalFieldFunction.getParameterValue(name)
+            if value != 0.0:
+                params[name] = value
+        return params
+
+    def _getFieldTies(self):
+        import re
+        ties = re.match('ties=\((.*?)\)', str(self.crystalFieldFunction))
+        return ties.group(1) if ties else ''
+
+    def _getFieldConstraints(self):
+        import re
+        constraints = re.match('constraints=\((.*?)\)', str(self.crystalFieldFunction))
+        return constraints.group(1) if constraints else ''
+
     def _getPhysProp(self, ppobj, workspace, ws_index):
         """
         Returns a physical properties calculation
@@ -1101,9 +1000,9 @@ class CrystalField(object):
         """
         if self._dirty_eigensystem:
             import CrystalField.energies as energies
-            if self._nre < -99:
-                raise RuntimeError('J value ' + str(-self._nre / 2) + ' is too large.')
-            self._eigenvalues, self._eigenvectors, self._hamiltonian = energies.energies(self._nre, **self._fieldParameters)
+            nre = self.ion_nre_map[self.Ion]
+            self._eigenvalues, self._eigenvectors, self._hamiltonian = \
+                energies.energies(nre, **self._getFieldParameters())
             self._dirty_eigensystem = False
 
     def _calcPeaksList(self, i):
@@ -1135,16 +1034,13 @@ class CrystalField(object):
         alg.setProperty('WorkspaceIndex', ws_index)
         alg.setProperty('OutputWorkspace', 'dummy')
         alg.execute()
-        fun = alg.getProperty('Function').value
-        if not self._isMultiSpectra():
-            self.update(fun)
         out = alg.getProperty('OutputWorkspace').value
         # Create copies of the x and y because `out` goes out of scope when this method returns
         # and x and y get deallocated
         return np.array(out.readX(0)), np.array(out.readY(1))
 
-    def _isMultiSpectra(self):
-        return islistlike(self._temperature)
+    def isMultiSpectrum(self):
+        return self._isMultiSpectrum
 
 
 class CrystalFieldSite(object):
@@ -1221,6 +1117,11 @@ class CrystalFieldMulti(object):
         return ','.join(ties)
 
     def getSpectrum(self, i=0, workspace=None, ws_index=0):
+        tt = []
+        for site in self.sites:
+            tt = tt + (list(site.Temperature) if islistlike(site.Temperature) else [site.Temperature])
+        if any([val < 0 for val in tt]):
+            raise RuntimeError('You must first define a temperature for the spectrum')
         largest_abundance= max(self.abundances)
         if workspace is not None:
             xArray, yArray = self.sites[0].getSpectrum(i, workspace, ws_index)
@@ -1313,23 +1214,29 @@ class CrystalFieldMulti(object):
             a.PhysicalProperty = value
 
     @property
-    def numPhysicalPropertyData(self):
-        num_spec = []
-        for a in self.sites:
-            num_spec.append(a.numPhysicalPropertyData)
-        if len(set(num_spec)) > 1:
-            raise ValueError('Number of physical properties datasets for each site not consistent')
-        return num_spec[0]
-
-    def check_consistency(self):
-        """ Checks that list input variables are consistent """
+    def NumberOfSpectra(self):
+        """ Returns the number of expected workspaces """
         num_spec = []
         for site in self.sites:
-            num_spec.append(site.check_consistency())
+            num_spec.append(site.NumberOfSpectra)
         if len(set(num_spec)) > 1:
             raise ValueError('Number of spectra for each site not consistent with each other')
         return num_spec[0]
 
+    @property
+    def Temperature(self):
+        tt = []
+        for site in self.sites:
+            tt.append([val for val in (site.Temperature if islistlike(site.Temperature) else [site.Temperature])])
+        if len(set([tuple(val) for val in tt])) > 1:
+            raise ValueError('Temperatures of spectra for each site not consistent with each other')
+        return tt[0]
+
+    @Temperature.setter
+    def Temperature(self, value):
+        for site in self.sites:
+            site.Temperature = value
+
     def __add__(self, other):
         if isinstance(other, CrystalFieldMulti):
             cfm = CrystalFieldMulti()
@@ -1462,6 +1369,10 @@ class CrystalFieldFit(object):
         """
         from mantid.api import AlgorithmManager
         fun = self.model.makeSpectrumFunction()
+        if 'CrystalFieldMultiSpectrum' in fun:
+            # Hack to ensure that 'PhysicalProperties' attribute is first
+            # otherwise it won't set up other attributes properly
+            fun = re.sub(r'(name=.*?,)(.*?)(PhysicalProperties=\(.*?\),)',r'\1\3\2', fun)
         alg = AlgorithmManager.createUnmanaged('EstimateFitParameters')
         alg.initialize()
         alg.setProperty('Function', fun)
@@ -1481,6 +1392,8 @@ class CrystalFieldFit(object):
         """
         from mantid.api import AlgorithmManager
         fun = self.model.makeMultiSpectrumFunction()
+        if 'CrystalFieldMultiSpectrum' in fun:
+            fun = re.sub(r'(name=.*?,)(.*?)(PhysicalProperties=\(.*?\),)',r'\1\3\2', fun)
         alg = AlgorithmManager.createUnmanaged('EstimateFitParameters')
         alg.initialize()
         alg.setProperty('Function', fun)
@@ -1493,7 +1406,7 @@ class CrystalFieldFit(object):
             alg.setProperty(param, kwargs[param])
         alg.execute()
         function = alg.getProperty('Function').value
-        self.model.update_multi(function)
+        self.model.update(function)
         self._function = function
 
     def _fit_single(self):
@@ -1508,6 +1421,8 @@ class CrystalFieldFit(object):
                 fun = self.model.makeSpectrumFunction()
         else:
             fun = str(self._function)
+        if 'CrystalFieldMultiSpectrum' in fun:
+            fun = re.sub(r'(name=.*?,)(.*?)(PhysicalProperties=\(.*?\),)',r'\1\3\2', fun)
         alg = AlgorithmManager.createUnmanaged('Fit')
         alg.initialize()
         alg.setProperty('Function', fun)
@@ -1525,6 +1440,8 @@ class CrystalFieldFit(object):
         """
         from mantid.api import AlgorithmManager
         fun = self.model.makeMultiSpectrumFunction()
+        if 'CrystalFieldMultiSpectrum' in fun:
+            fun = re.sub(r'(name=.*?,)(.*?)(PhysicalProperties=\(.*?\),)',r'\1\3\2', fun)
         alg = AlgorithmManager.createUnmanaged('Fit')
         alg.initialize()
         alg.setProperty('Function', fun)
@@ -1537,7 +1454,7 @@ class CrystalFieldFit(object):
         self._set_fit_properties(alg)
         alg.execute()
         function = alg.getProperty('Function').value
-        self.model.update_multi(function)
+        self.model.update(function)
         self.model.chi2 = alg.getProperty('OutputChi2overDoF').value
 
     def _set_fit_properties(self, alg):
@@ -1546,7 +1463,7 @@ class CrystalFieldFit(object):
 
     def check_consistency(self):
         """ Checks that list input variables are consistent """
-        num_ws = self.model.check_consistency() + self.model.numPhysicalPropertyData
+        num_ws = self.model.NumberOfSpectra
         errmsg = 'Number of input workspaces not consistent with model'
         if islistlike(self._input_workspace):
             if num_ws != len(self._input_workspace):
@@ -1556,3 +1473,7 @@ class CrystalFieldFit(object):
                 self._input_workspace = self._input_workspace[0]
         elif num_ws != 1:
             raise ValueError(errmsg)
+        if not self.model.isPhysicalPropertyOnly:
+            tt = self.model.Temperature
+            if any([val < 0 for val in (tt if islistlike(tt) else [tt])]):
+                raise RuntimeError('You must first define a temperature for the spectrum')
diff --git a/scripts/Inelastic/CrystalField/function.py b/scripts/Inelastic/CrystalField/function.py
index 0f802a985b255fcb74d1193a3daa7c823687bfc0..5d6f24e20778cd609b540287efacb76ca709709d 100644
--- a/scripts/Inelastic/CrystalField/function.py
+++ b/scripts/Inelastic/CrystalField/function.py
@@ -5,10 +5,46 @@ from six import string_types
 parNamePattern = re.compile(r'([a-zA-Z][\w.]+)')
 
 
+class FunctionParameters(object):
+    """
+    A helper class that simplifies access to parameters of nested composite fitting functions.
+    """
+    def __init__(self, function, prefix=''):
+        self.function = function
+        self.prefix = prefix
+
+    def __getitem__(self, name):
+        return self.function.getParameterValue(self.prefix + name)
+
+    def __setitem__(self, name, value):
+        self.function.setParameter(self.prefix + name, value)
+
+    def update(self, function):
+        self.function = function
+
+
+class FunctionAttributes(object):
+    """
+    A helper class that simplifies access to attributes of nested composite fitting functions.
+    """
+    def __init__(self, function, prefix=''):
+        self.function = function
+        self.prefix = prefix
+
+    def __getitem__(self, name):
+        return self.function.getAttributeValue(self.prefix + name)
+
+    def __setitem__(self, name, value):
+        self.function.setAttributeValue(self.prefix + name, value)
+
+    def update(self, function):
+        self.function = function
+
+
 class Function(object):
     """A helper object that simplifies getting and setting parameters of a simple named function."""
 
-    def __init__(self, name, **kwargs):
+    def __init__(self, name_or_function, **kwargs):
         """
         Initialise new instance.
         @param name: A valid name registered with the FunctionFactory.
@@ -17,37 +53,24 @@ class Function(object):
                     f = Function('TabulatedFunction', Scaling=2.0)
                     f.attr['Workspace'] = 'workspace_with_data'
         """
-        self._name = name
+        from mantid.simpleapi import FunctionFactory
+        if isinstance(name_or_function, str):
+            self.function = FunctionFactory.createFunction(name_or_function)
+        else:
+            self.function = name_or_function
+        if 'prefix' in kwargs:
+            self.prefix = kwargs['prefix']
+            del kwargs['prefix']
+        else:
+            self.prefix = ''
         # Function attributes.
-        self._attrib = {}
+        self._attrib = FunctionAttributes(self.function, self.prefix)
         # Function parameters.
-        self._params = {}
+        self._params = FunctionParameters(self.function, self.prefix)
+        # The rest of kw arguments are treated as function parameters
         for param in kwargs:
             self._params[param] = kwargs[param]
 
-        self._ties = {}
-        self._constraints = []
-
-    def copyFrom(self, attrib, params, ties, constraints):
-        """Make shallow copies of the member collections"""
-        from copy import copy
-        self._attrib = copy(attrib)
-        self._params = copy(params)
-        self._ties = copy(ties)
-        self._constraints = copy(constraints)
-
-    def clone(self):
-        """Make a copy of self."""
-        function = Function(self._name)
-        # Make shallow copies of the member collections
-        function.copyFrom(self._attrib, self._params, self._ties, self._constraints)
-        return function
-
-    @property
-    def name(self):
-        """Read only name of this function"""
-        return self._name
-
     @property
     def attr(self):
         return self._attrib
@@ -63,8 +86,8 @@ class Function(object):
             the value is a tie string or a number. For example:
                 tie(A0 = 0.1, A1 = '2*A0')
         """
-        for tie in kwargs:
-            self._ties[tie] = kwargs[tie]
+        for param in kwargs:
+            self.function.tie(self.prefix + param, str(kwargs[param]))
 
     def constraints(self, *args):
         """
@@ -73,196 +96,128 @@ class Function(object):
         @param args: A list of constraints. For example:
                 constraints('A0 > 0', '0.1 < A1 < 0.9')
         """
-        self._constraints += args
+        for arg in args:
+            constraint = re.sub(parNamePattern, '%s\\1' % self.prefix, arg)
+            self.function.addConstraints(constraint)
 
     def toString(self):
         """Create function initialisation string"""
-        attrib = ['%s=%s' % item for item in self._attrib.items()] + \
-                 ['%s=%s' % item for item in self._params.items()]
-        if len(attrib) > 0:
-            out = 'name=%s,%s' % (self._name, ','.join(attrib))
-        else:
-            out = 'name=%s' % self._name
-        ties = ','.join(['%s=%s' % item for item in self._ties.items()])
-        if len(ties) > 0:
-            out += ',ties=(%s)' % ties
-        constraints = ','.join(self._constraints)
-        if len(constraints) > 0:
-            out += ',constraints=(%s)' % constraints
-        return out
-
-    def paramString(self, prefix):
-        """Create a string with only parameters and attributes settings.
-            The prefix is prepended to all attribute names.
-        """
-        attrib = ['%s%s=%s' % ((prefix,) + item) for item in self._attrib.items()] + \
-                 ['%s%s=%s' % ((prefix,) + item) for item in self._params.items()]
-        return ','.join(attrib)
-
-    def tiesString(self, prefix):
-        """Create a string with only ties settings.
-            The prefix is prepended to all parameter names.
-        """
-        ties = ['%s%s=%s' % ((prefix,) + item) for item in self._ties.items()]
-        return ','.join(ties)
+        if self.prefix != '':
+            raise RuntimeError('Cannot convert to string a part of function')
+        return str(self.function)
 
-    def constraintsString(self, prefix):
-        """Create a string with only constraints settings.
-            The prefix is prepended to all parameter names.
-        """
-        if len(prefix) > 0:
-            constraints = []
-            for constraint in self._constraints:
-                constraint = re.sub(parNamePattern, prefix + '\\1', constraint)
-                constraints.append(constraint)
-        else:
-            constraints = self._constraints
-        return ','.join(constraints)
-
-    def update(self, func):
+    def update(self, function):
         """
         Update values of the fitting parameters.
         @param func: A IFunction object containing new parameter values.
         """
-        for i in range(func.nParams()):
-            par = func.parameterName(i)
-            self._params[par] = func.getParameterValue(i)
+        self._attrib.update(function)
+        self._params.update(function)
 
 
 class CompositeProperties(object):
     """
-    A dictionary of dictionaries of function properties: attributes or parameters.
-    It mimics properties of a CompositeFunction: the key is a function index and the value
-    id a map 'param_name' -> param_value.
-
-    Example:
-        {
-          0: {'Height': 100, 'Sigma': 1.0}, # Parameters of the first function
-          1: {'Height': 120, 'Sigma': 2.0}, # Parameters of the second function
-          5: {'Height': 300, 'Sigma': 3.0}, # Parameters of the sixth function
-          ...
-        }
+    A helper class that simplifies access of attributes and parameters of a composite function.
     """
+    def __init__(self, function, prefix, kind, first_index):
+        """
+        Constructor.
+        Args:
+            function: a function that this object provides access to
+            prefix: a prefix that is prepended to properties names. This makes it easier to access parameters
+                    of a nested composite function.
+            kind: a kind of properties accessed: 'attributes' or 'parameters'
+            firstIndex: shifts the index of a member function
+        """
+        self.function = function
+        self.prefix = prefix
+        self.PropertyType = FunctionAttributes if kind == 'attributes' else FunctionParameters
+        self.first_index = first_index
+
+    def __getitem__(self, i):
+        """
+        Get a FunctionParameters or FunctionAttributes object that give access to properties of the i-th
+        member function (shifted by self.firstIndex).
+
+        For example:
+            function = FunctionFactory.createInitialized('name=Gaussian,Sigma=1;name=Gaussian,Sigma=2')
+            params = CompositeProperties(function, '', 'parameters', 0)
+            assert params[0]['Sigma'] == 1
+            assert params[1]['Sigma'] == 2
+            params[1]['Sigma'] = 3
+            assert params[1]['Sigma'] == 3
+        Args:
+            i: index of a member function to get/set parameters
+        Returns:
+            FunctionParameters or FunctionAttributes object.
+        """
+        return self.PropertyType(self.function, self.prefix + 'f%s.' % (i + self.first_index))
+
+    def update(self, function):
+        self.function = function
+
+    def ties(self, ties_dict):
+        """Set ties on the parameters.
 
-    def __init__(self):
-        self._properties = {}
-
-    def __getitem__(self, item):
-        """Get a map of properties for a function number <item>"""
-        if item not in self._properties:
-            self._properties[item] = {}
-        return self._properties[item]
-
-    def getSize(self):
-        """Get number of maps (functions) defined here"""
-        keys = list(self._properties.keys())
-        if len(keys) > 0:
-            return max(keys) + 1
-        return 0
-
-    def toStringList(self):
-        """Format all properties into a list of strings where each string is a comma-separated
-        list of name=value pairs.
+        :param ties_dict: Ties as name=value pairs: name is a parameter name,
+            the value is a tie string or a number. For example:
+                tie({'A0': 0.1, 'A1': '2*A0'})
         """
-        prop_list = []
-        for i in range(self.getSize()):
-            if i in self._properties:
-                props = self._properties[i]
-                prop_list.append(','.join(['%s=%s' % item for item in sorted(props.items())]))
-            else:
-                prop_list.append('')
-        return prop_list
+        for param, tie in ties_dict.items():
+            tie = re.sub(parNamePattern, '%s\\1' % self.prefix, tie)
+            self.function.tie(self.prefix + param, tie)
 
-    def toCompositeString(self, prefix, shift=0):
-        """Format all properties as a comma-separated list of name=value pairs where name is formatted
-        in the CompositeFunction style.
+    def constraints(self, *args):
+        """
+        Set constraints for the parameters.
 
-        Example:
-            'f0.Height=100,f0.Sigma=1.0,f1.Height=120,f1.Sigma=2.0,f5.Height=300,f5.Sigma=3.0'
+        @param args: A list of constraints. For example:
+                constraints('A0 > 0', '0.1 < A1 < 0.9')
         """
-        out = ''
-        for i in self._properties:
-            fullPrefix = '%sf%s.' % (prefix, i + shift)
-            props = self._properties[i]
-            if len(out) > 0:
-                out += ','
-            out += ','.join(['%s%s=%s' % ((fullPrefix,) + item) for item in sorted(props.items())])
-        return out[:]
+        for arg in args:
+            constraint = re.sub(parNamePattern, '%s\\1' % self.prefix, arg)
+            self.function.addConstraints(constraint)
 
 
 class PeaksFunction(object):
     """A helper object that simplifies getting and setting parameters of a composite function
-    containing multiple peaks of the same type.
-
-    The object of this class has no access to the C++ fit function it represents. It means that
-    it doesn't know what attributes or parameters the function defines and relies on the user
-    to provide correct information.
-
-    @param name: A name of the individual peak function, such as 'Lorentzian' or 'Gaussian'.
-        If None then the default function is used (currently 'Lorentzian')
+    containing multiple peaks of the same spectrum.
     """
 
-    def __init__(self, name=None, firstIndex=0):
+    def __init__(self, function, prefix, first_index):
         """
         Constructor.
-
-        @param name: The name of the function of each peak.  E.g. Gaussian
-
-        @param firstIndex: Index of the first peak in the function. For a single spectrum
-                function it is 0, in a multi-spectral case it's 1.
+        :param function: A CrystalField function who's peaks we want to access.
+        :param prefix: a prefix of the parameters of the spectrum we want to access.
+        :param first_index: Index of the first peak
         """
-        # Name of the peaks
-        self._name = name if name is not None else 'Lorentzian'
         # Collection of all attributes
-        self._attrib = CompositeProperties()
+        self._attrib = CompositeProperties(function, prefix, 'attributes', first_index)
         # Collection of all parameters
-        self._params = CompositeProperties()
-        # Ties
-        self._ties = []
-        # Constraints
-        self._constraints = []
-        # Index of the first peak
-        self._firstIndex = firstIndex
-
-    @property
-    def name(self):
-        """Read only name of the peak function"""
-        return self._name
+        self._params = CompositeProperties(function, prefix, 'parameters', first_index)
 
     @property
     def attr(self):
-        """Get a dict of all currently set attributes.
-        Use this property to set or get an attribute.
-        You can only get an attribute that has been previously set via this property.
+        """Get or set the function attributes.
+        Returns a FunctionAttributes object that accesses the peaks' attributes.
         """
         return self._attrib
 
     @property
     def param(self):
-        """Get a dict of all currently set parameters
-        Use this property to set or get a parameter.
-        You can only get a parameter that has been previously set via this property.
-        Example:
-
-            fun = PeaksFunction('Gaussian')
-            # Set Sigma parameter of the second peak
-            peaks.param[1]['Sigma'] = 0.1
-            ...
-            # Get the value of a previously set parameter
-            sigma = peaks.param[1]['Sigma']
-            ...
-            # Trying to get a value that wasn't set results in an error
-            height = peaks[1]['Height'] # error
+        """Get or set the function parameters.
+        Returns a FunctionParameters object that accesses the peaks' parameters.
         """
         return self._params
 
-    def ties(self, *ties):
+    def ties(self, ties_dict):
         """Set ties on the peak parameters.
 
-        @param ties: A list of ties. For example:
-                ties('f1.Sigma=0.1', 'f2.Sigma=2*f0.Sigma')
+        :param ties_dict: Ties as name=value pairs: name is a parameter name,
+              the value is a tie string or a number. For example:
+              ties({'f1.Sigma': '0.1', 'f2.Sigma': '2*f0.Sigma'})
         """
-        self._ties += ties
+        self._params.ties(ties_dict)
 
     def constraints(self, *constraints):
         """
@@ -271,7 +226,7 @@ class PeaksFunction(object):
         @param constraints: A list of constraints. For example:
                 constraints('f0.Sigma > 0', '0.1 < f1.Sigma < 0.9')
         """
-        self._constraints += constraints
+        self._params.constraints(*constraints)
 
     def tieAll(self, tie, iFirstN, iLast=-1):
         """
@@ -290,11 +245,13 @@ class PeaksFunction(object):
             start = iFirstN
             end = iLast + 1
         else:
-            start = self._firstIndex
-            end = iFirstN + self._firstIndex
-        pattern = 'f%s.' + tie
-        ties = [pattern % i for i in range(start, end)]
-        self.ties(*ties)
+            start = self._params.first_index
+            end = iFirstN + self._params.first_index
+        name, expr = tuple(tie.split('='))
+        name = 'f%s.' + name.strip()
+        expr = expr.strip()
+        ties = {(name % i): expr for i in range(start, end)}
+        self.ties(ties)
 
     def constrainAll(self, constraint, iFirstN, iLast=-1):
         """
@@ -313,74 +270,12 @@ class PeaksFunction(object):
             start = iFirstN
             end = iLast + 1
         else:
-            start = self._firstIndex
-            end = iFirstN + self._firstIndex
+            start = self._params.first_index
+            end = iFirstN + self._params.first_index
 
         pattern = re.sub(parNamePattern, 'f%s.\\1', constraint)
         self.constraints(*[pattern % i for i in range(start, end)])
 
-    def nPeaks(self):
-        """Get the number of peaks"""
-        numPeaks = max(self._attrib.getSize(), self._params.getSize())
-        if numPeaks == 0:
-            raise RuntimeError('PeaksFunction has no defined parameters or attributes.')
-        return numPeaks
-
-    def toString(self):
-        """Create function initialisation string"""
-        numPeaks = self.nPeaks()
-        attribs = self._attrib.toStringList()
-        params = self._params.toStringList()
-        if len(attribs) < numPeaks:
-            attribs += [''] * (numPeaks - len(attribs))
-        if len(params) < numPeaks:
-            params += [''] * (numPeaks - len(params))
-        peaks = []
-        for i in range(numPeaks):
-            attrib = attribs[i]
-            param = params[i]
-            if len(attrib) != 0 or len(param) != 0:
-                if len(attrib) == 0:
-                    peaks.append('name=%s,%s' % (self._name, param))
-                elif len(param) == 0:
-                    peaks.append('name=%s,%s' % (self._name, attrib))
-                else:
-                    peaks.append('name=%s,%s,%s' % (self._name, attrib,param))
-            else:
-                peaks.append('name=%s' % self._name)
-        out = ';'.join(peaks)
-        if len(self._ties) > 0:
-            out += ';%s' % self.tiesString()
-        return out
-
-    def paramString(self, prefix='', shift=0):
-        """Format a comma-separated list of all peaks attributes and parameters in a CompositeFunction
-        style.
-        """
-        numAttributes = self._attrib.getSize()
-        numParams = self._params.getSize()
-        if numAttributes == 0 and numParams == 0:
-            return ''
-        elif numAttributes == 0:
-            return self._params.toCompositeString(prefix, shift)
-        elif numParams == 0:
-            return self._attrib.toCompositeString(prefix, shift)
-        else:
-            return '%s,%s' % (self._attrib.toCompositeString(prefix, shift),
-                              self._params.toCompositeString(prefix, shift))
-
-    def tiesString(self, prefix=''):
-        if len(self._ties) > 0:
-            ties = ','.join(self._ties)
-            return 'ties=(%s)' % re.sub(parNamePattern, prefix + '\\1', ties)
-        return ''
-
-    def constraintsString(self, prefix=''):
-        if len(self._constraints) > 0:
-            constraints = ','.join(self._constraints)
-            return 'constraints=(%s)' % re.sub(parNamePattern, prefix + '\\1', constraints)
-        return ''
-
 
 class Background(object):
     """Object representing spectrum background: a sum of a central peak and a
@@ -405,16 +300,6 @@ class Background(object):
             aCopy.background = self.background.clone()
         return aCopy
 
-    def __mul__(self, nCopies):
-        """Make expressions like Background(...) * 8 return a list of 8 identical backgrounds."""
-        copies = [self] * nCopies
-        return list(map(Background.clone, copies))
-        # return [self.clone() for i in range(nCopies)]
-
-    def __rmul__(self, nCopies):
-        """Make expressions like 2 * Background(...) return a list of 2 identical backgrounds."""
-        return self.__mul__(nCopies)
-
     def toString(self):
         if self.peak is None and self.background is None:
             return ''
@@ -422,57 +307,7 @@ class Background(object):
             return self.background.toString()
         if self.background is None:
             return self.peak.toString()
-        return '%s;%s' % (self.peak.toString(), self.background.toString())
-
-    def nameString(self):
-        if self.peak is None and self.background is None:
-            return ''
-        if self.peak is None:
-            return self.background.name
-        if self.background is None:
-            return self.peak.name
-        return '"name=%s;name=%s"' % (self.peak.name, self.background.name)
-
-    def paramString(self, prefix):
-        if self.peak is None and self.background is None:
-            return ''
-        if self.peak is None:
-            return self.background.paramString(prefix)
-        if self.background is None:
-            return self.peak.paramString(prefix)
-        return '%s,%s' % (self.peak.paramString(prefix + 'f0.'), self.background.paramString(prefix + 'f1.'))
-
-    def tiesString(self, prefix):
-        if self.peak is None and self.background is None:
-            return ''
-        if self.peak is None:
-            return self.background.tiesString(prefix)
-        if self.background is None:
-            return self.peak.tiesString(prefix)
-        peakString = self.peak.tiesString(prefix + 'f0.')
-        backgroundString = self.background.tiesString(prefix + 'f1.')
-        if len(peakString) == 0:
-            return backgroundString
-        elif len(backgroundString) == 0:
-            return peakString
-        else:
-            return '%s,%s' % (peakString, backgroundString)
-
-    def constraintsString(self, prefix):
-        if self.peak is None and self.background is None:
-            return ''
-        if self.peak is None:
-            return self.background.constraintsString(prefix)
-        if self.background is None:
-            return self.peak.constraintsString(prefix)
-        peakString = self.peak.constraintsString(prefix + 'f0.')
-        backgroundString = self.background.constraintsString(prefix + 'f1.')
-        if len(peakString) == 0:
-            return backgroundString
-        elif len(backgroundString) == 0:
-            return peakString
-        else:
-            return '%s,%s' % (peakString, backgroundString)
+        return '(%s;%s)' % (self.peak.toString(), self.background.toString())
 
     def update(self, func1, func2=None):
         """
@@ -538,6 +373,13 @@ class ResolutionModel:
         self._checkModel(model)
         self.model = model
 
+    @property
+    def NumberOfSpectra(self):
+        if not self.multi:
+            return 1
+        else:
+            return len(self.model)
+
     def _checkModel(self, model):
         if not isinstance(model, tuple):
             raise RuntimeError('Resolution model must be a tuple of two arrays of floats.\n'
@@ -793,19 +635,20 @@ class PhysicalProperties(object):
                     out += ',Lambda=%s' % (self._lambda)
         return out
 
-    def envString(self, dataset=0):
-        """Create environment string for multidataset fitting"""
-        dataset = str(dataset)
-        out = ''
+    def getAttributes(self, dataset=None):
+        """Returns a dictionary of PhysicalProperties attributes for use with IFunction"""
+        dataset = '' if dataset is None else str(dataset)
+        out = {}
         if self._typeid > 1:
-            out += 'Unit%s=%s' % (dataset, self._physpropUnit)
+            out['Unit%s' % (dataset)] = self._physpropUnit
             if 'powder' in self._hdir:
-                out += ',powder%s=1' % (dataset)
+                out['powder%s' % (dataset)] = 1
             else:
-                out += ',Hdir%s=(%s)' % (dataset, ','.join([str(hh) for hh in self._hdir]))
+                out['Hdir%s' % (dataset)] = [float(hh) for hh in self._hdir] # needs to be list
             if self._typeid != 3:  # either susceptibility or M(T)
-                out += ',inverse%s=%s' % (dataset, 1 if self._suscInverseFlag else 0)
-                out += (',Hmag%s=%s' % (dataset, self._hmag)) if self._typeid==3 else ''
+                out['inverse%s' % (dataset)] = 1 if self._suscInverseFlag else 0
+                if self._typeid==3:
+                    out['Hmag%s' % (dataset)] = self._hmag
                 if self._typeid == 2 and self._lambda != 0:
-                    out += ',Lambda%s=%s' % (dataset, self._lambda)
+                    out['Lambda%s=' % (dataset)] = self._lambda
         return out
diff --git a/scripts/Inelastic/IndirectReductionCommon.py b/scripts/Inelastic/IndirectReductionCommon.py
index 84ce8f7e568571ddb5120dfeb084b96c5a5defac..81266cf0a52563a025034d9a76474ea56aed8e26 100644
--- a/scripts/Inelastic/IndirectReductionCommon.py
+++ b/scripts/Inelastic/IndirectReductionCommon.py
@@ -629,7 +629,7 @@ def plot_reduction(workspace_name, plot_type):
         from mantidplot import plotSpectrum
         num_spectra = mtd[workspace_name].getNumberHistograms()
         try:
-            plotSpectrum(workspace_name, range(0, num_spectra), error_bars=True)
+            plotSpectrum(workspace_name, range(0, num_spectra))
         except RuntimeError:
             logger.notice('Spectrum plotting canceled by user')
 
diff --git a/scripts/Inelastic/vesuvio/commands.py b/scripts/Inelastic/vesuvio/commands.py
index 46d71cd3b24d0f66b51844e0de9e6f64ef378090..fd1982b85a02b3161020359b90ea07c742e3ac57 100644
--- a/scripts/Inelastic/vesuvio/commands.py
+++ b/scripts/Inelastic/vesuvio/commands.py
@@ -1,4 +1,4 @@
-#pylint: disable=too-many-arguments,invalid-name,too-many-locals,too-many-branches
+# pylint: disable=too-many-arguments,invalid-name,too-many-locals,too-many-branches
 """
 Defines functions and classes to start the processing of Vesuvio data.
 The main entry point that most users should care about is fit_tof().
@@ -56,7 +56,7 @@ def fit_tof(runs, flags, iterations=1, convergence_threshold=None):
 
     exit_iteration = 0
 
-    for iteration in range(1, iterations+1):
+    for iteration in range(1, iterations + 1):
         iteration_flags = copy.deepcopy(flags)
         iteration_flags['iteration'] = iteration
 
@@ -82,7 +82,7 @@ def fit_tof(runs, flags, iterations=1, convergence_threshold=None):
 
         last_results = results
 
-    return (last_results[0], last_results[2], last_results[3], exit_iteration)
+    return last_results[0], last_results[2], last_results[3], exit_iteration
 
 
 def fit_tof_iteration(sample_data, container_data, runs, flags):
@@ -112,10 +112,14 @@ def fit_tof_iteration(sample_data, container_data, runs, flags):
     num_spec = sample_data.getNumberHistograms()
     pre_correct_pars_workspace = None
     pars_workspace = None
+    fit_workspace = None
     max_fit_iterations = flags.get('max_fit_iterations', 5000)
 
     output_groups = []
     chi2_values = []
+    data_workspaces = []
+    result_workspaces = []
+    group_name = runs + '_result'
     for index in range(num_spec):
         if isinstance(profiles_strs, list):
             profiles = profiles_strs[index]
@@ -148,7 +152,7 @@ def fit_tof_iteration(sample_data, container_data, runs, flags):
         ms.DeleteWorkspace(corrections_fit_name)
         corrections_args['FitParameters'] = pre_correction_pars_name
 
-        # Add the mutiple scattering arguments
+        # Add the multiple scattering arguments
         corrections_args.update(flags['ms_flags'])
 
         corrected_data_name = runs + "_tof_corrected" + suffix
@@ -199,6 +203,9 @@ def fit_tof_iteration(sample_data, container_data, runs, flags):
         if pars_workspace is None:
             pars_workspace = _create_param_workspace(num_spec, mtd[pars_name])
 
+        if fit_workspace is None:
+            fit_workspace = _create_param_workspace(num_spec, mtd[linear_correction_fit_params_name])
+
         spec_num_str = str(sample_data.getSpectrum(index).getSpectrumNo())
         current_spec = 'spectrum_' + spec_num_str
 
@@ -208,34 +215,56 @@ def fit_tof_iteration(sample_data, container_data, runs, flags):
         _update_fit_params(pars_workspace, index,
                            mtd[pars_name], current_spec)
 
+        _update_fit_params(fit_workspace, index, mtd[linear_correction_fit_params_name], current_spec)
+
         ms.DeleteWorkspace(pre_correction_pars_name)
         ms.DeleteWorkspace(pars_name)
+        ms.DeleteWorkspace(linear_correction_fit_params_name)
 
         # Process spectrum group
         # Note the ordering of operations here gives the order in the WorkspaceGroup
-        group_name = runs + suffix
-        output_workspaces = [fit_ws_name, linear_correction_fit_params_name]
+        output_workspaces = []
+        data_workspaces.append(fit_ws_name)
         if flags.get('output_verbose_corrections', False):
             output_workspaces += mtd[corrections_args["CorrectionWorkspaces"]].getNames()
             output_workspaces += mtd[corrections_args["CorrectedWorkspaces"]].getNames()
             ms.UnGroupWorkspace(corrections_args["CorrectionWorkspaces"])
             ms.UnGroupWorkspace(corrections_args["CorrectedWorkspaces"])
 
-        output_groups.append(ms.GroupWorkspaces(InputWorkspaces=output_workspaces,
-                                                OutputWorkspace=group_name))
+            for workspace in output_workspaces:
+
+                group_name = runs + '_iteration_' + str(flags.get('iteration', None))
+                name = group_name + '_' + workspace.split('_')[1] + '_' + workspace.split('_')[-1]
+                result_workspaces.append(name)
+                if index == 0:
+                    ms.RenameWorkspace(InputWorkspace=workspace, OutputWorkspace=name)
+                else:
+                    ms.ConjoinWorkspaces(InputWorkspace1=name, InputWorkspace2=workspace)
 
         # Output the parameter workspaces
         params_pre_corr = runs + "_params_pre_correction_iteration_" + str(flags['iteration'])
         params_name = runs + "_params_iteration_" + str(flags['iteration'])
+        fit_name = runs + "_correction_fit_scale_iteration_" + str(flags['iteration'])
         AnalysisDataService.Instance().addOrReplace(params_pre_corr, pre_correct_pars_workspace)
         AnalysisDataService.Instance().addOrReplace(params_name, pars_workspace)
+        AnalysisDataService.Instance().addOrReplace(fit_name, fit_workspace)
+
+    if result_workspaces:
+        output_groups.append(ms.GroupWorkspaces(InputWorkspaces=result_workspaces,
+                                                OutputWorkspace=group_name))
+
+    if data_workspaces:
+        output_groups.append(ms.GroupWorkspaces(InputWorkspaces=data_workspaces,
+                                                OutputWorkspace=group_name + '_data'))
+    else:
+        output_groups.append(fit_ws_name)
 
     if len(output_groups) > 1:
         result_ws = output_groups
     else:
         result_ws = output_groups[0]
 
-    return (result_ws, pre_correct_pars_workspace, pars_workspace, chi2_values)
+    return result_ws, pre_correct_pars_workspace, pars_workspace, chi2_values
 
 
 def load_and_crop_data(runs, spectra, ip_file, diff_mode='single',
@@ -292,6 +321,7 @@ def load_and_crop_data(runs, spectra, ip_file, diff_mode='single',
 
     return tof_data
 
+
 # --------------------------------------------------------------------------------
 # Private Functions
 # --------------------------------------------------------------------------------
@@ -299,7 +329,7 @@ def load_and_crop_data(runs, spectra, ip_file, diff_mode='single',
 
 def _update_masses_from_params(old_masses, param_ws):
     """
-    Update the massses flag based on the results of a fit.
+    Update the masses flag based on the results of a fit.
 
     @param old_masses The existing masses dictionary
     @param param_ws The workspace to update from
@@ -372,7 +402,7 @@ def _create_tof_workspace_suffix(runs, spectra):
 
 def _create_fit_workspace_suffix(index, tof_data, fit_mode, spectra, iteration=None):
     if fit_mode == "bank":
-        suffix = "_" + spectra + "_bank_" + str(index+1)
+        suffix = "_" + spectra + "_bank_" + str(index + 1)
     else:
         spectrum = tof_data.getSpectrum(index)
         suffix = "_spectrum_" + str(spectrum.getSpectrumNo())
@@ -426,12 +456,12 @@ def _create_background_str(background_flags):
 def _create_intensity_constraint_str(intensity_constraints):
     """
     Create a string suitable for the algorithms out of the intensity constraint flags
-    :param inten_constr_flags: A list of lists for the constraints (can be None)
+    :param intensity_constraints: A list of lists for the constraints (can be None)
     :return: A string to pass to the algorithm
     """
     if intensity_constraints:
         if not isinstance(intensity_constraints[0], list):
-            intensity_constraints = [intensity_constraints,]
+            intensity_constraints = [intensity_constraints]
         # Make each element a string and then join them together
         intensity_constraints = [str(c) for c in intensity_constraints]
         intensity_constraints_str = ";".join(intensity_constraints)
@@ -451,11 +481,11 @@ def _create_user_defined_ties_str(masses):
     for index, mass in enumerate(masses):
         if 'ties' in mass:
             ties = mass['ties'].split(',')
-            function_indentifier = 'f' + str(index) + '.'
+            function_identifier = 'f' + str(index) + '.'
             for t in ties:
-                tie_str = function_indentifier + t
+                tie_str = function_identifier + t
                 equal_pos = tie_str.index('=') + 1
-                tie_str = tie_str[:equal_pos] + function_indentifier + tie_str[equal_pos:]
+                tie_str = tie_str[:equal_pos] + function_identifier + tie_str[equal_pos:]
                 user_defined_ties.append(tie_str)
     user_defined_ties = ','.join(user_defined_ties)
     return user_defined_ties
diff --git a/scripts/SANS/ISISCommandInterface.py b/scripts/SANS/ISISCommandInterface.py
index d49aac99a9f589efdd6058f3d3903d31040b1c7e..1980786e8fe636a271faf18e7fdc73bd762be275 100644
--- a/scripts/SANS/ISISCommandInterface.py
+++ b/scripts/SANS/ISISCommandInterface.py
@@ -1767,14 +1767,13 @@ def is_current_workspace_an_angle_workspace():
     return is_angle
 
 
-def MatchIDFInReducerAndWorkspace(file_name):
-    '''
-    This method checks if the IDF which gets loaded with the workspace associated
-    with the file name and the current instrument in the reducer singleton refer
-    to the same IDF. If not then switch the IDF in the reducer.
-    '''
-    is_matched = True
+def _get_idf_path_for_run(file_name):
+    """
+    This method finds the full file location for a run number
 
+    :param file_name: the file name or run number
+    :return: the full path to the corresponding IDF
+    """
     # Get measurement time from file
     measurement_time = su.get_measurement_time_from_file(file_name)
 
@@ -1783,16 +1782,30 @@ def MatchIDFInReducerAndWorkspace(file_name):
 
     # Get the path to the instrument definition file
     idf_path_workspace = ExperimentInfo.getInstrumentFilename(instrument_name, measurement_time)
-    idf_path_workspace = os.path.normpath(idf_path_workspace)
+    return os.path.normpath(idf_path_workspace)
+
+
+def get_idf_path_for_run(file_name):
+    idf_path_workspace = _get_idf_path_for_run(file_name)
+    print(idf_path_workspace)
+    return idf_path_workspace
+
+
+def MatchIDFInReducerAndWorkspace(file_name):
+    '''
+    This method checks if the IDF which gets loaded with the workspace associated
+    with the file name and the current instrument in the reducer singleton refer
+    to the same IDF. If not then switch the IDF in the reducer.
+    '''
+
+    # Get the IDF path
+    idf_path_workspace = _get_idf_path_for_run(file_name)
 
     # Get the idf from the reducer
     idf_path_reducer = get_current_idf_path_in_reducer()
 
-    if ((idf_path_reducer == idf_path_workspace) and
-            su.are_two_files_identical(idf_path_reducer, idf_path_reducer)):
-        is_matched = True
-    else:
-        is_matched = False
+    is_matched = ((idf_path_reducer == idf_path_workspace) and
+                  su.are_two_files_identical(idf_path_reducer, idf_path_reducer))
 
     return is_matched
 
diff --git a/scripts/SANS/SANSBatchMode.py b/scripts/SANS/SANSBatchMode.py
index ab0ab26821c34ea2a47016c5a47f1823df563aa7..533a1a271fa929fe2041c94610ca1400129691ed 100644
--- a/scripts/SANS/SANSBatchMode.py
+++ b/scripts/SANS/SANSBatchMode.py
@@ -226,6 +226,10 @@ def BatchReduce(filename, format, plotresults=False, saveAlgs={'SaveRKH':'txt'},
     original_user_file = ReductionSingleton().user_settings.filename
     current_user_file = original_user_file
 
+    # Store the original combineDet which was set either by the input. this should be used whenever we are using the
+    # original user file
+    original_combine_det = combineDet
+
     # Now loop over all the lines and do a reduction (hopefully) for each
     for run in runinfo:
         # Set the user file, if it is required
@@ -235,12 +239,16 @@ def BatchReduce(filename, format, plotresults=False, saveAlgs={'SaveRKH':'txt'},
                                                        original_user_file=original_user_file,
                                                        original_settings = settings,
                                                        original_prop_man_settings = prop_man_settings)
-            # When we set a new user file, that means that the combineDet feature could be invalid,
-            # ie if the detector under investigation changed in the user file. We need to change this
-            # here too. But only if it is not None.
-            if combineDet is not None:
-                new_combineDet = ReductionSingleton().instrument.get_detector_selection()
-                combineDet = su.get_correct_combinDet_setting(ins_name, new_combineDet)
+
+            if current_user_file == original_user_file:
+                combineDet = original_combine_det
+            else:
+                # When we set a new user file, that means that the combineDet feature could be invalid,
+                # ie if the detector under investigation changed in the user file. We need to change this
+                # here too. But only if it is not None.
+                if combineDet is not None:
+                    new_combineDet = ReductionSingleton().instrument.get_detector_selection()
+                    combineDet = su.get_correct_combinDet_setting(ins_name, new_combineDet)
         except (RuntimeError, ValueError) as e:
             sanslog.warning("Error in Batchmode user files: Could not reset the specified user file %s. More info: %s" %(
                 str(run['user_file']), str(e)))
diff --git a/scripts/SANS/SANSUtility.py b/scripts/SANS/SANSUtility.py
index 784a9277e78b23ff7c9b2bb2f2bc3058554eaacc..14c223bfdda4d35ebeabe4e22dbafe5e4d130cb5 100644
--- a/scripts/SANS/SANSUtility.py
+++ b/scripts/SANS/SANSUtility.py
@@ -1997,7 +1997,7 @@ def get_correct_combinDet_setting(instrument_name, detector_selection):
     detector_selection = detector_selection.upper()
     # If we are dealing with LOQ, then the correct combineDet selection is
     if instrument_name == "LOQ":
-        if detector_selection == "MAIN":
+        if detector_selection == "MAIN" or detector_selection == "MAIN-DETECTOR-BANK":
             new_combine_detector_selection = 'rear'
         elif detector_selection == "HAB":
             new_combine_detector_selection = 'front'
@@ -2012,9 +2012,9 @@ def get_correct_combinDet_setting(instrument_name, detector_selection):
 
     # If we are dealing with SANS2D, then the correct combineDet selection is
     if instrument_name == "SANS2D":
-        if detector_selection == "REAR":
+        if detector_selection == "REAR" or detector_selection == "REAR-DETECTOR":
             new_combine_detector_selection = 'rear'
-        elif detector_selection == "FRONT":
+        elif detector_selection == "FRONT" or detector_selection == "FRONT-DETECTOR":
             new_combine_detector_selection = 'front'
         elif detector_selection == "MERGED":
             new_combine_detector_selection = 'merged'
diff --git a/scripts/SANS/sans/algorithm_detail/batch_execution.py b/scripts/SANS/sans/algorithm_detail/batch_execution.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1d432c8d0516cbf2cffd879c227c1b691b3626b
--- /dev/null
+++ b/scripts/SANS/sans/algorithm_detail/batch_execution.py
@@ -0,0 +1,957 @@
+from __future__ import (absolute_import, division, print_function)
+from copy import deepcopy
+from mantid.api import AnalysisDataService
+
+from sans.common.general_functions import (create_managed_non_child_algorithm, create_unmanaged_algorithm,
+                                           get_output_name, get_base_name_from_multi_period_name)
+from sans.common.enums import (SANSDataType, SaveType, OutputMode, ISISReductionMode)
+from sans.common.constants import (TRANS_SUFFIX, SANS_SUFFIX, ALL_PERIODS,
+                                   LAB_CAN_SUFFIX, LAB_CAN_COUNT_SUFFIX, LAB_CAN_NORM_SUFFIX,
+                                   HAB_CAN_SUFFIX, HAB_CAN_COUNT_SUFFIX, HAB_CAN_NORM_SUFFIX,
+                                   REDUCED_HAB_AND_LAB_WORKSPACE_FOR_MERGED_REDUCTION,
+                                   REDUCED_CAN_AND_PARTIAL_CAN_FOR_OPTIMIZATION)
+from sans.common.file_information import (get_extension_for_file_type, SANSFileInformationFactory)
+from sans.state.data import StateData
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Functions for the execution of a single batch iteration
+# ----------------------------------------------------------------------------------------------------------------------
+def single_reduction_for_batch(state, use_optimizations, output_mode):
+    """
+    Runs a single reduction.
+
+    This function creates reduction packages which essentially contain information for a single valid reduction, run it
+    and store the results according to the user specified setting (output_mode). Although this is considered a single
+    reduction it can contain still several reductions since the SANSState object can at this point contain slice
+    settings which require on reduction per time slice.
+    :param state: a SANSState object
+    :param use_optimizations: if true then the optimizations of child algorithms are enabled.
+    :param output_mode: the output mode
+    """
+    # ------------------------------------------------------------------------------------------------------------------
+    # Load the data
+    # ------------------------------------------------------------------------------------------------------------------
+    workspace_to_name = {SANSDataType.SampleScatter: "SampleScatterWorkspace",
+                         SANSDataType.SampleTransmission: "SampleTransmissionWorkspace",
+                         SANSDataType.SampleDirect: "SampleDirectWorkspace",
+                         SANSDataType.CanScatter: "CanScatterWorkspace",
+                         SANSDataType.CanTransmission: "CanTransmissionWorkspace",
+                         SANSDataType.CanDirect: "CanDirectWorkspace"}
+
+    workspace_to_monitor = {SANSDataType.SampleScatter: "SampleScatterMonitorWorkspace",
+                            SANSDataType.CanScatter: "CanScatterMonitorWorkspace"}
+
+    workspaces, monitors = provide_loaded_data(state, use_optimizations, workspace_to_name, workspace_to_monitor)
+
+    # ------------------------------------------------------------------------------------------------------------------
+    # Get reduction settings
+    # Split into individual bundles which can be reduced individually. We split here if we have multiple periods or
+    # sliced times for example.
+    # ------------------------------------------------------------------------------------------------------------------
+    reduction_packages = get_reduction_packages(state, workspaces, monitors)
+
+    # ------------------------------------------------------------------------------------------------------------------
+    # Run reductions (one at a time)
+    # ------------------------------------------------------------------------------------------------------------------
+    single_reduction_name = "SANSSingleReduction"
+    single_reduction_options = {"UseOptimizations": use_optimizations}
+    reduction_alg = create_managed_non_child_algorithm(single_reduction_name, **single_reduction_options)
+    reduction_alg.setChild(False)
+    # Perform the data reduction
+    for reduction_package in reduction_packages:
+        # -----------------------------------
+        # Set the properties on the algorithm
+        # -----------------------------------
+        set_properties_for_reduction_algorithm(reduction_alg, reduction_package,
+                                               workspace_to_name, workspace_to_monitor)
+
+        # -----------------------------------
+        #  Run the reduction
+        # -----------------------------------
+        reduction_alg.execute()
+
+        # -----------------------------------
+        # Get the output of the algorithm
+        # -----------------------------------
+        reduction_package.reduced_lab = get_workspace_from_algorithm(reduction_alg, "OutputWorkspaceLAB")
+        reduction_package.reduced_hab = get_workspace_from_algorithm(reduction_alg, "OutputWorkspaceHAB")
+        reduction_package.reduced_merged = get_workspace_from_algorithm(reduction_alg, "OutputWorkspaceMerged")
+
+        reduction_package.reduced_lab_can = get_workspace_from_algorithm(reduction_alg, "OutputWorkspaceLABCan")
+        reduction_package.reduced_lab_can_count = get_workspace_from_algorithm(reduction_alg,
+                                                                               "OutputWorkspaceLABCanCount")
+        reduction_package.reduced_lab_can_norm = get_workspace_from_algorithm(reduction_alg,
+                                                                              "OutputWorkspaceLABCanNorm")
+        reduction_package.reduced_hab_can = get_workspace_from_algorithm(reduction_alg, "OutputWorkspaceHABCan")
+        reduction_package.reduced_hab_can_count = get_workspace_from_algorithm(reduction_alg,
+                                                                               "OutputWorkspaceHABCanCount")
+        reduction_package.reduced_hab_can_norm = get_workspace_from_algorithm(reduction_alg,
+                                                                              "OutputWorkspaceHABCanNorm")
+
+        # -----------------------------------
+        # The workspaces are already on the ADS, but should potentially be grouped
+        # -----------------------------------
+        group_workspaces_if_required(reduction_package)
+
+    # --------------------------------
+    # Perform output of all workspaces
+    # --------------------------------
+    # We have three options here
+    # 1. PublishToADS:
+    #    * This means we can leave it as it is
+    # 2. SaveToFile:
+    #    * This means we need to save out the reduced data
+    #    * Then we need to delete the reduced data from the ADS
+    # 3. Both:
+    #    * This means that we need to save out the reduced data
+    #    * The data is already on the ADS, so do nothing
+
+    if output_mode is OutputMode.SaveToFile:
+        save_to_file(reduction_packages)
+        delete_reduced_workspaces(reduction_packages)
+    elif output_mode is OutputMode.Both:
+        save_to_file(reduction_packages)
+
+    # -----------------------------------------------------------------------
+    # Clean up other workspaces if the optimizations have not been turned on.
+    # -----------------------------------------------------------------------
+    if not use_optimizations:
+        delete_optimization_workspaces(reduction_packages)
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Functions for Data Loading
+# ----------------------------------------------------------------------------------------------------------------------
+def get_expected_workspace_names(file_information, is_transmission, period, get_base_name_only=False):
+    """
+    Creates the expected names for SANS workspaces.
+
+    SANS scientists expect the load workspaces to have certain, typical names. For example, the file SANS2D00022024.nxs
+    which is used as a transmission workspace translates into 22024_trans_nxs.
+    :param file_information: a file information object
+    :param is_transmission: if the file information is for a transmission or not
+    :param period: the period of interest
+    :param get_base_name_only: if we only want the base name and not the name with the period information
+    :return: a list of workspace names
+    """
+    suffix_file_type = get_extension_for_file_type(file_information)
+    if is_transmission:
+        suffix_data = TRANS_SUFFIX
+    else:
+        suffix_data = SANS_SUFFIX
+
+    run_number = file_information.get_run_number()
+
+    # Three possibilities:
+    #  1. No period data => 22024_sans_nxs
+    #  2. Period data, but wants all => 22025p1_sans_nxs,  22025p2_sans_nxs, ...
+    #  3. Period data, select particular period => 22025p3_sans_nxs
+    if file_information.get_number_of_periods() == 1:
+        workspace_name = "{0}_{1}_{2}".format(run_number, suffix_data, suffix_file_type)
+        names = [workspace_name]
+    elif file_information.get_number_of_periods() > 1 and period is StateData.ALL_PERIODS:
+        workspace_names = []
+        if get_base_name_only:
+            workspace_names.append("{0}_{1}_{2}".format(run_number, suffix_data, suffix_file_type))
+        else:
+            for period in range(1, file_information.get_number_of_periods() + 1):
+                workspace_names.append("{0}p{1}_{2}_{3}".format(run_number, period, suffix_data, suffix_file_type))
+        names = workspace_names
+    elif file_information.get_number_of_periods() > 1 and period is not StateData.ALL_PERIODS:
+        workspace_name = "{0}p{1}_{2}_{3}".format(run_number, period, suffix_data, suffix_file_type)
+        names = [workspace_name]
+    else:
+        raise RuntimeError("SANSLoad: Cannot create workspace names.")
+    return names
+
+
+def set_output_workspace_on_load_algorithm_for_one_workspace_type(load_options, load_workspace_name, file_name, period,
+                                                                  is_transmission, file_info_factory,
+                                                                  load_monitor_name=None):
+    file_info = file_info_factory.create_sans_file_information(file_name)
+    workspace_names = get_expected_workspace_names(file_info, is_transmission=is_transmission, period=period,
+                                                   get_base_name_only=True)
+    count = 0
+    # Now we set the load options if we are dealing with multi-period data, then we need to
+    for workspace_name in workspace_names:
+        if count == 0:
+            load_options.update({load_workspace_name: workspace_name})
+            if load_monitor_name is not None:
+                monitor_name = workspace_name + "_monitors"
+                load_options.update({load_monitor_name: monitor_name})
+        else:
+            load_workspace_name_for_period = load_workspace_name + "_" + str(count)
+            load_options.update({load_workspace_name_for_period: workspace_name})
+            if load_monitor_name is not None:
+                load_monitor_name_for_period = load_monitor_name + "_" + str(count)
+                monitor_name = workspace_name + "_monitors"
+                load_options.update({load_monitor_name_for_period: monitor_name})
+        count += 1
+
+
+def set_output_workspaces_on_load_algorithm(load_options, state):
+    data = state.data
+    file_information_factory = SANSFileInformationFactory()
+
+    # SampleScatter and SampleScatterMonitor
+    set_output_workspace_on_load_algorithm_for_one_workspace_type(load_options=load_options,
+                                                                  load_workspace_name="SampleScatterWorkspace",
+                                                                  file_name=data.sample_scatter,
+                                                                  period=data.sample_scatter_period,
+                                                                  is_transmission=False,
+                                                                  file_info_factory=file_information_factory,
+                                                                  load_monitor_name="SampleScatterMonitorWorkspace")
+
+    # SampleTransmission
+    sample_transmission = data.sample_transmission
+    if sample_transmission:
+        set_output_workspace_on_load_algorithm_for_one_workspace_type(load_options=load_options,
+                                                                      load_workspace_name="SampleTransmissionWorkspace",
+                                                                      file_name=sample_transmission,
+                                                                      period=data.sample_transmission_period,
+                                                                      is_transmission=True,
+                                                                      file_info_factory=file_information_factory)
+    # SampleDirect
+    sample_direct = data.sample_direct
+    if sample_direct:
+        set_output_workspace_on_load_algorithm_for_one_workspace_type(load_options=load_options,
+                                                                      load_workspace_name="SampleDirectWorkspace",
+                                                                      file_name=sample_direct,
+                                                                      period=data.sample_direct_period,
+                                                                      is_transmission=True,
+                                                                      file_info_factory=file_information_factory)
+
+    # CanScatter + CanMonitor
+    can_scatter = data.can_scatter
+    if can_scatter:
+        set_output_workspace_on_load_algorithm_for_one_workspace_type(load_options=load_options,
+                                                                      load_workspace_name="CanScatterWorkspace",
+                                                                      file_name=can_scatter,
+                                                                      period=data.can_scatter_period,
+                                                                      is_transmission=False,
+                                                                      file_info_factory=file_information_factory,
+                                                                      load_monitor_name="CanScatterMonitorWorkspace")
+
+    # CanTransmission
+    can_transmission = data.can_transmission
+    if can_transmission:
+        set_output_workspace_on_load_algorithm_for_one_workspace_type(load_options=load_options,
+                                                                      load_workspace_name="CanTransmissionWorkspace",
+                                                                      file_name=can_transmission,
+                                                                      period=data.can_transmission_period,
+                                                                      is_transmission=True,
+                                                                      file_info_factory=file_information_factory)
+    # CanDirect
+    can_direct = data.can_direct
+    if can_direct:
+        set_output_workspace_on_load_algorithm_for_one_workspace_type(load_options=load_options,
+                                                                      load_workspace_name="CanDirectWorkspace",
+                                                                      file_name=can_direct,
+                                                                      period=data.can_direct_period,
+                                                                      is_transmission=True,
+                                                                      file_info_factory=file_information_factory)
+
+
+def provide_loaded_data(state, use_optimizations, workspace_to_name, workspace_to_monitor):
+    """
+    Provide the data for reduction.
+
+
+    :param state: a SANSState object.
+    :param use_optimizations: if optimizations are enabled, then the load mechanism will search for workspaces on the
+                              ADS.
+    :param workspace_to_name: a map of SANSDataType vs output-property name of SANSLoad for workspaces
+    :param workspace_to_monitor: a map of SANSDataType vs output-property name of SANSLoad for monitor workspaces
+    :return: a list fo workspaces and a list of monitor workspaces
+    """
+    # Load the data
+    state_serialized = state.property_manager
+    load_name = "SANSLoad"
+    load_options = {"SANSState": state_serialized,
+                    "PublishToCache": use_optimizations,
+                    "UseCached": use_optimizations,
+                    "MoveWorkspace": False}
+
+    # Set the output workspaces
+    set_output_workspaces_on_load_algorithm(load_options, state)
+
+    load_alg = create_managed_non_child_algorithm(load_name, **load_options)
+    load_alg.execute()
+
+    # Retrieve the data
+    workspace_to_count = {SANSDataType.SampleScatter: "NumberOfSampleScatterWorkspaces",
+                          SANSDataType.SampleTransmission: "NumberOfSampleTransmissionWorkspaces",
+                          SANSDataType.SampleDirect: "NumberOfSampleDirectWorkspaces",
+                          SANSDataType.CanScatter: "NumberOfCanScatterWorkspaces",
+                          SANSDataType.CanTransmission: "NumberOfCanTransmissionWorkspaces",
+                          SANSDataType.CanDirect: "NumberOfCanDirectWorkspaces"}
+
+    workspaces = get_workspaces_from_load_algorithm(load_alg, workspace_to_count, workspace_to_name)
+    monitors = get_workspaces_from_load_algorithm(load_alg, workspace_to_count, workspace_to_monitor)
+    return workspaces, monitors
+
+
+def add_loaded_workspace_to_ads(load_alg, workspace_property_name, workspace):
+    """
+    Adds a workspace with the name that was set on the output of the load algorithm to the ADS
+
+
+    :param load_alg: a handle to the load algorithm
+    :param workspace_property_name: the workspace property name
+    :param workspace: the workspace
+    """
+    workspace_name = load_alg.getProperty(workspace_property_name).valueAsStr
+    AnalysisDataService.addOrReplace(workspace_name, workspace)
+
+
+def get_workspaces_from_load_algorithm(load_alg, workspace_to_count, workspace_name_dict):
+    """
+    Reads the workspaces from SANSLoad
+
+    :param load_alg: a handle to the load algorithm
+    :param workspace_to_count: a map from SANSDataType to the output-number property name of SANSLoad for workspaces
+    :param workspace_name_dict: a map of SANSDataType vs output-property name of SANSLoad for (monitor) workspaces
+    :return: a map of SANSDataType vs list of workspaces (to handle multi-period data)
+    """
+    workspace_output = {}
+    for workspace_type, workspace_name in list(workspace_name_dict.items()):
+        count_id = workspace_to_count[workspace_type]
+        number_of_workspaces = load_alg.getProperty(count_id).value
+        workspaces = []
+        if number_of_workspaces > 1:
+            workspaces = get_multi_period_workspaces(load_alg, workspace_name_dict[workspace_type],
+                                                     number_of_workspaces)
+        else:
+            workspace_id = workspace_name_dict[workspace_type]
+            workspace = get_workspace_from_algorithm(load_alg, workspace_id)
+            if workspace is not None:
+                workspaces.append(workspace)
+        # Add the workspaces to the to the output
+        workspace_output.update({workspace_type: workspaces})
+    return workspace_output
+
+
+def get_multi_period_workspaces(load_alg, workspace_name, number_of_workspaces):
+    # Create an output name for each workspace and retrieve it from the load algorithm
+    workspaces = []
+    workspace_names = []
+    for index in range(1, number_of_workspaces + 1):
+        output_property_name = workspace_name + "_" + str(index)
+        output_workspace_name = load_alg.getProperty(output_property_name).valueAsStr
+        workspace_names.append(output_workspace_name)
+        workspace = get_workspace_from_algorithm(load_alg, output_property_name)
+        workspaces.append(workspace)
+
+    # Group the workspaces
+    base_name = get_base_name_from_multi_period_name(workspace_names[0])
+    group_name = "GroupWorkspaces"
+    group_options = {"InputWorkspaces": workspace_names,
+                     "OutputWorkspace": base_name}
+    group_alg = create_unmanaged_algorithm(group_name, **group_options)
+    group_alg.setChild(False)
+    group_alg.execute()
+    return workspaces
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Functions for reduction packages
+# ----------------------------------------------------------------------------------------------------------------------
+def get_reduction_packages(state, workspaces, monitors):
+    """
+    This function creates a set of reduction packages which contain the necessary state for a single reduction
+    as well as the required workspaces.
+
+    There are several reasons why a state can (and should) split up:
+    1. Multi-period files were loaded. This means that we need to perform one reduction per (loaded) period
+    2. Event slices were specified. This means that we need to perform one reduction per event slice.
+
+    :param state: A single state which potentially needs to be split up into several states
+    :param workspaces: The workspaces contributing to the reduction
+    :param monitors: The monitors contributing to the reduction
+    :return: A set of "Reduction packages" where each reduction package defines a single reduction.
+    """
+    # First: Split the state on a per-period basis
+    reduction_packages = create_initial_reduction_packages(state, workspaces, monitors)
+
+    # Second: Split resulting reduction packages on a per-event-slice basis
+    # Note that at this point all reduction packages will have the same state information. They only differ in the
+    # workspaces that they use.
+    if reduction_packages_require_splitting_for_event_slices(reduction_packages):
+        reduction_packages = split_reduction_packages_for_event_slice_packages(reduction_packages)
+
+    # TODO: Third: Split resulting reduction packages on a per-wave-length-range basis
+    return reduction_packages
+
+
+def reduction_packages_require_splitting_for_event_slices(reduction_packages):
+    """
+    Creates reduction packages from a list of reduction packages by splitting up event slices.
+
+    The SANSSingleReduction algorithm can handle only a single time slice. For each time slice, we require an individual
+    reduction. Hence we split the states up at this point.
+    :param reduction_packages: a list of reduction packages.
+    :return: a list of reduction packages which has at leaset the same length as the input
+    """
+    # Determine if the event slice sub-state object contains multiple event slice requests. This is given
+    # by the number of elements in start_tof
+    reduction_package = reduction_packages[0]
+    state = reduction_package.state
+    slice_event_info = state.slice
+    start_time = slice_event_info.start_time
+    if start_time is not None and len(start_time) > 1:
+        requires_split = True
+    else:
+        requires_split = False
+    return requires_split
+
+
+def split_reduction_packages_for_event_slice_packages(reduction_packages):
+    """
+    Splits a reduction package object into several reduction package objects if it contains several event slice settings
+
+    We want to split this up here since each event slice is a full reduction cycle in itself.
+    :param reduction_packages: a list of reduction packages
+    :return: a list of reduction packages where each reduction setting contains only one event slice.
+    """
+    # Since the state is the same for all reduction packages at this point we only need to create the split state once
+    # for the first package and the apply to all the other packages. If we have 5 reduction packages and the user
+    # requests 6 event slices, then we end up with 60 reductions!
+    reduction_package = reduction_packages[0]
+    state = reduction_package.state
+    slice_event_info = state.slice
+    start_time = slice_event_info.start_time
+    end_time = slice_event_info.end_time
+
+    states = []
+    for start, end in zip(start_time, end_time):
+        state_copy = deepcopy(state)
+        slice_event_info = state_copy.slice
+        slice_event_info.start_time = [start]
+        slice_event_info.end_time = [end]
+        states.append(state_copy)
+
+    # Now that we have all the states spread them across the packages
+    reduction_packages_split = []
+    for reduction_package in reduction_packages:
+        workspaces = reduction_package.workspaces
+        monitors = reduction_package.monitors
+        is_part_of_multi_period_reduction = reduction_package.is_part_of_multi_period_reduction
+        for state in states:
+            new_state = deepcopy(state)
+            new_reduction_package = ReductionPackage(state=new_state,
+                                                     workspaces=workspaces,
+                                                     monitors=monitors,
+                                                     is_part_of_multi_period_reduction=is_part_of_multi_period_reduction,
+                                                     is_part_of_event_slice_reduction=True)
+            reduction_packages_split.append(new_reduction_package)
+    return reduction_packages_split
+
+
+def create_initial_reduction_packages(state, workspaces, monitors):
+    """
+    This provides the initial split of the workspaces.
+
+    If the data stems from multi-period data, then we need to split up the workspaces. The state object is valid
+    for each one of these workspaces. Hence we need to create a deep copy of them for each reduction package.
+
+    The way multi-period files are handled over the different workspaces input types is:
+    1. The sample scatter period determines all other periods, i.e. if the sample scatter workspace is has only
+       one period, but the sample transmission has two, then only the first period is used.
+    2. If the sample scatter period is not available on an other workspace type, then the last period on that
+       workspace type is used.
+
+    For the cases where the periods between the different workspaces types does not match, an information is logged.
+
+    :param state: A single state which potentially needs to be split up into several states
+    :param workspaces: The workspaces contributing to the reduction
+    :param monitors: The monitors contributing to the reduction
+    :return: A set of "Reduction packages" where each reduction package defines a single reduction.
+    """
+    # For loaded peri0d we create a package
+    packages = []
+
+    data_info = state.data
+    sample_scatter_period = data_info.sample_scatter_period
+    requires_new_period_selection = len(workspaces[SANSDataType.SampleScatter]) > 1 \
+                                    and sample_scatter_period == ALL_PERIODS  # noqa
+
+    is_multi_period = len(workspaces[SANSDataType.SampleScatter]) > 1
+
+    for index in range(0, len(workspaces[SANSDataType.SampleScatter])):
+        workspaces_for_package = {}
+        # For each workspace type, i.e sample scatter, can transmission, etc. find the correct workspace
+        for workspace_type, workspace_list in list(workspaces.items()):
+            workspace = get_workspace_for_index(index, workspace_list)
+            workspaces_for_package.update({workspace_type: workspace})
+
+        # For each monitor type, find the correct workspace
+        monitors_for_package = {}
+        for workspace_type, workspace_list in list(monitors.items()):
+            workspace = get_workspace_for_index(index, workspace_list)
+            monitors_for_package.update({workspace_type: workspace})
+        state_copy = deepcopy(state)
+
+        # Set the period on the state
+        if requires_new_period_selection:
+            state_copy.data.sample_scatter_period = index + 1
+        packages.append(ReductionPackage(state=state_copy,
+                                         workspaces=workspaces_for_package,
+                                         monitors=monitors_for_package,
+                                         is_part_of_multi_period_reduction=is_multi_period,
+                                         is_part_of_event_slice_reduction=False))
+    return packages
+
+
+def get_workspace_for_index(index, workspace_list):
+    """
+    Extracts the workspace from the list of workspaces. The index is set by the nth ScatterSample workspace.
+
+    There might be situation where there is no corresponding CanXXX workspace or SampleTransmission workspace etc,
+    since they are optional.
+
+    :param index: The index of the workspace from which to extract.
+    :param workspace_list: A list of workspaces.
+    :return: The workspace corresponding to the index or None
+    """
+    if workspace_list:
+        if index < len(workspace_list):
+            workspace = workspace_list[index]
+        else:
+            workspace = None
+    else:
+        workspace = None
+    return workspace
+
+
+def set_properties_for_reduction_algorithm(reduction_alg, reduction_package, workspace_to_name, workspace_to_monitor):
+    """
+    Sets up everything necessary on the reduction algorithm.
+
+    :param reduction_alg: a handle to the reduction algorithm
+    :param reduction_package: a reduction package object
+    :param workspace_to_name: the workspace to name map
+    :param workspace_to_monitor: a workspace to monitor map
+    """
+    def _set_output_name(_reduction_alg, _reduction_package, _is_group, _reduction_mode, _property_name,
+                         _attr_out_name, _atrr_out_name_base, _suffix=None):
+        _out_name, _out_name_base = get_output_name(_reduction_package.state, _reduction_mode, _is_group)
+
+        if _suffix is not None:
+            _out_name += _suffix
+            _out_name_base += _suffix
+
+        _reduction_alg.setProperty(_property_name, _out_name)
+        setattr(_reduction_package, _attr_out_name, _out_name)
+        setattr(_reduction_package, _atrr_out_name_base, _out_name_base)
+
+    def _set_lab(_reduction_alg, _reduction_package, _is_group):
+        _set_output_name(_reduction_alg, _reduction_package, _is_group, ISISReductionMode.LAB,
+                         "OutputWorkspaceLABCan", "reduced_lab_can_name", "reduced_lab_can_base_name",
+                         LAB_CAN_SUFFIX)
+
+        # Lab Can Count workspace - this is a partial workspace
+        _set_output_name(_reduction_alg, _reduction_package, _is_group, ISISReductionMode.LAB,
+                         "OutputWorkspaceLABCanCount", "reduced_lab_can_count_name", "reduced_lab_can_count_base_name",
+                         LAB_CAN_COUNT_SUFFIX)
+
+        # Lab Can Norm workspace - this is a partial workspace
+        _set_output_name(_reduction_alg, _reduction_package, _is_group, ISISReductionMode.LAB,
+                         "OutputWorkspaceLABCanNorm", "reduced_lab_can_norm_name", "reduced_lab_can_norm_base_name",
+                         LAB_CAN_NORM_SUFFIX)
+
+    def _set_hab(_reduction_alg, _reduction_package, _is_group):
+        # Hab Can Workspace
+        _set_output_name(_reduction_alg, _reduction_package, _is_group, ISISReductionMode.HAB,
+                         "OutputWorkspaceHABCan", "reduced_hab_can_name", "reduced_hab_can_base_name",
+                         HAB_CAN_SUFFIX)
+
+        # Hab Can Count workspace - this is a partial workspace
+        _set_output_name(_reduction_alg, _reduction_package, _is_group, ISISReductionMode.HAB,
+                         "OutputWorkspaceHABCanCount", "reduced_hab_can_count_name", "reduced_hab_can_count_base_name",
+                         HAB_CAN_COUNT_SUFFIX)
+
+        # Hab Can Norm workspace - this is a partial workspace
+        _set_output_name(_reduction_alg, _reduction_package, _is_group, ISISReductionMode.HAB,
+                         "OutputWorkspaceHABCanNorm", "reduced_hab_can_norm_name", "reduced_hab_can_norm_base_name",
+                         HAB_CAN_NORM_SUFFIX)
+
+    # Go through the elements of the reduction package and set them on the reduction algorithm
+    # Set the SANSState
+    state = reduction_package.state
+    state_dict = state.property_manager
+    reduction_alg.setProperty("SANSState", state_dict)
+
+    # Set the input workspaces
+    workspaces = reduction_package.workspaces
+    for workspace_type, workspace in list(workspaces.items()):
+        if workspace is not None:
+            reduction_alg.setProperty(workspace_to_name[workspace_type], workspace)
+
+    # Set the monitors
+    monitors = reduction_package.monitors
+    for workspace_type, monitor in list(monitors.items()):
+        if monitor is not None:
+            reduction_alg.setProperty(workspace_to_monitor[workspace_type], monitor)
+
+    # ------------------------------------------------------------------------------------------------------------------
+    # Set the output workspaces for LAB, HAB and Merged
+    # ------------------------------------------------------------------------------------------------------------------
+    is_part_of_multi_period_reduction = reduction_package.is_part_of_multi_period_reduction
+    is_part_of_event_slice_reduction = reduction_package.is_part_of_event_slice_reduction
+    is_group = is_part_of_multi_period_reduction or is_part_of_event_slice_reduction
+
+    reduction_mode = reduction_package.reduction_mode
+    if reduction_mode is ISISReductionMode.Merged:
+        _set_output_name(reduction_alg, reduction_package, is_group, ISISReductionMode.Merged,
+                         "OutputWorkspaceMerged", "reduced_merged_name", "reduced_merged_base_name")
+        _set_output_name(reduction_alg, reduction_package, is_group, ISISReductionMode.LAB,
+                         "OutputWorkspaceLAB", "reduced_lab_name", "reduced_lab_base_name", "_lab")
+        _set_output_name(reduction_alg, reduction_package, is_group, ISISReductionMode.HAB,
+                         "OutputWorkspaceHAB", "reduced_hab_name", "reduced_hab_base_name", "_hab")
+    elif reduction_mode is ISISReductionMode.LAB:
+        _set_output_name(reduction_alg, reduction_package, is_group, ISISReductionMode.LAB,
+                         "OutputWorkspaceLAB", "reduced_lab_name", "reduced_lab_base_name")
+    elif reduction_mode is ISISReductionMode.HAB:
+        _set_output_name(reduction_alg, reduction_package, is_group, ISISReductionMode.HAB,
+                         "OutputWorkspaceHAB", "reduced_hab_name", "reduced_hab_base_name")
+    elif reduction_mode is ISISReductionMode.Both:
+        _set_output_name(reduction_alg, reduction_package, is_group, ISISReductionMode.LAB,
+                         "OutputWorkspaceLAB", "reduced_lab_name", "reduced_lab_base_name")
+        _set_output_name(reduction_alg, reduction_package, is_group, ISISReductionMode.HAB,
+                         "OutputWorkspaceHAB", "reduced_hab_name", "reduced_hab_base_name")
+    else:
+        raise RuntimeError("The reduction mode {0} is not known".format(reduction_mode))
+
+    # ------------------------------------------------------------------------------------------------------------------
+    # Set the output workspaces for the can reduction and the partial can reductions
+    # ------------------------------------------------------------------------------------------------------------------
+    # Set the output workspaces for the can reductions -- note that these will only be set if optimizations
+    # are enabled
+    # Lab Can Workspace
+    if reduction_mode is ISISReductionMode.Merged:
+        _set_lab(reduction_alg, reduction_package, is_group)
+        _set_hab(reduction_alg, reduction_package, is_group)
+    elif reduction_mode is ISISReductionMode.LAB:
+        _set_lab(reduction_alg, reduction_package, is_group)
+    elif reduction_mode is ISISReductionMode.HAB:
+        _set_hab(reduction_alg, reduction_package, is_group)
+    elif reduction_mode is ISISReductionMode.Both:
+        _set_lab(reduction_alg, reduction_package, is_group)
+        _set_hab(reduction_alg, reduction_package, is_group)
+    else:
+        raise RuntimeError("The reduction mode {0} is not known".format(reduction_mode))
+
+
+def get_workspace_from_algorithm(alg, output_property_name):
+    """
+    Gets the output workspace from an algorithm. Since we don't run this as a child we need to get it from the
+    ADS.
+
+    :param alg: a handle to the algorithm from which we want to take the output workspace property.
+    :param output_property_name: the name of the output property.
+    :return the workspace or None
+    """
+    output_workspace_name = alg.getProperty(output_property_name).valueAsStr
+
+    if not output_workspace_name:
+        return None
+
+    if AnalysisDataService.doesExist(output_workspace_name):
+        return AnalysisDataService.retrieve(output_workspace_name)
+    else:
+        return None
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Functions for outputs to the ADS and saving the file
+# ----------------------------------------------------------------------------------------------------------------------
+def group_workspaces_if_required(reduction_package):
+    """
+    The output workspaces have already been published to the ADS by the algorithm. Now we might have to
+    bundle them into a group if:
+    * They are part of a multi-period workspace or a sliced reduction
+    * They are reduced LAB and HAB workspaces of a Merged reduction
+    * They are can workspaces - they are all grouped into a single group
+    :param reduction_package: a list of reduction packages
+    """
+    is_part_of_multi_period_reduction = reduction_package.is_part_of_multi_period_reduction
+    is_part_of_event_slice_reduction = reduction_package.is_part_of_event_slice_reduction
+    requires_grouping = is_part_of_multi_period_reduction or is_part_of_event_slice_reduction
+
+    reduced_lab = reduction_package.reduced_lab
+    reduced_hab = reduction_package.reduced_hab
+    reduced_merged = reduction_package.reduced_merged
+
+    is_merged_reduction = reduced_merged is not None
+
+    # Add the reduced workspaces to groups if they require this
+    if is_merged_reduction:
+        if requires_grouping:
+            add_to_group(reduced_merged, reduction_package.reduced_merged_base_name)
+            add_to_group(reduced_lab, REDUCED_HAB_AND_LAB_WORKSPACE_FOR_MERGED_REDUCTION)
+            add_to_group(reduced_hab, REDUCED_HAB_AND_LAB_WORKSPACE_FOR_MERGED_REDUCTION)
+        else:
+            add_to_group(reduced_lab, REDUCED_HAB_AND_LAB_WORKSPACE_FOR_MERGED_REDUCTION)
+            add_to_group(reduced_hab, REDUCED_HAB_AND_LAB_WORKSPACE_FOR_MERGED_REDUCTION)
+    else:
+        if requires_grouping:
+            add_to_group(reduced_lab, reduction_package.reduced_lab_base_name)
+            add_to_group(reduced_hab, reduction_package.reduced_hab_base_name)
+
+    # Add the can workspaces (used for optimizations) to a Workspace Group (if they exist)
+    add_to_group(reduction_package.reduced_lab_can, REDUCED_CAN_AND_PARTIAL_CAN_FOR_OPTIMIZATION)
+    add_to_group(reduction_package.reduced_lab_can_count, REDUCED_CAN_AND_PARTIAL_CAN_FOR_OPTIMIZATION)
+    add_to_group(reduction_package.reduced_lab_can_norm, REDUCED_CAN_AND_PARTIAL_CAN_FOR_OPTIMIZATION)
+
+    add_to_group(reduction_package.reduced_hab_can, REDUCED_CAN_AND_PARTIAL_CAN_FOR_OPTIMIZATION)
+    add_to_group(reduction_package.reduced_hab_can_count, REDUCED_CAN_AND_PARTIAL_CAN_FOR_OPTIMIZATION)
+    add_to_group(reduction_package.reduced_hab_can_norm, REDUCED_CAN_AND_PARTIAL_CAN_FOR_OPTIMIZATION)
+
+
+def add_to_group(workspace, name_of_group_workspace):
+    """
+    Creates a group workspace with the base name for the workspace
+
+    :param workspace: the workspace to add to the WorkspaceGroup
+    :param name_of_group_workspace: the name of the WorkspaceGroup
+    """
+    if workspace is None:
+        return
+    name_of_workspace = workspace.name()
+    if AnalysisDataService.doesExist(name_of_group_workspace):
+        group_workspace = AnalysisDataService.retrieve(name_of_group_workspace)
+        group_workspace.add(name_of_workspace)
+    else:
+        group_name = "GroupWorkspaces"
+        group_options = {"InputWorkspaces": [name_of_workspace],
+                         "OutputWorkspace": name_of_group_workspace}
+        group_alg = create_unmanaged_algorithm(group_name, **group_options)
+        # At this point we are dealing with the ADS, hence we need to make sure that this is not called as
+        # a child algorithm
+        group_alg.setChild(False)
+        group_alg.execute()
+
+
+def save_to_file(reduction_packages):
+    """
+    Extracts all workspace names which need to be saved and saves them into a file.
+
+    @param reduction_packages: a list of reduction packages which contain all the relevant information for saving
+    """
+    workspaces_names_to_save = get_all_names_to_save(reduction_packages)
+
+    state = reduction_packages[0].state
+    save_info = state.save
+    file_formats = save_info.file_format
+    for name_to_save in workspaces_names_to_save:
+        save_workspace_to_file(name_to_save, file_formats)
+
+
+def delete_reduced_workspaces(reduction_packages):
+    """
+    Deletes all workspaces which would have been generated from a list of reduction packages.
+
+    @param reduction_packages: a list of reduction package
+    """
+    def _delete_workspaces(_delete_alg, _workspaces):
+        for _workspace in _workspaces:
+            if _workspace is not None:
+                _delete_alg.setProperty("Workspace", _workspace.name())
+                _delete_alg.execute()
+    # Get all names which were saved out to workspaces
+    # Delete each workspace
+    delete_name = "DeleteWorkspace"
+    delete_options = {}
+    delete_alg = create_unmanaged_algorithm(delete_name, **delete_options)
+
+    for reduction_package in reduction_packages:
+        reduced_lab = reduction_package.reduced_lab
+        reduced_hab = reduction_package.reduced_hab
+        reduced_merged = reduction_package.reduced_merged
+        _delete_workspaces(delete_alg, [reduced_lab, reduced_hab, reduced_merged])
+
+
+def delete_optimization_workspaces(reduction_packages):
+    """
+    Deletes all workspaces which are used for optimizations. This can be loaded workspaces or can optimizations
+
+    :param reduction_packages: a list of reductioin packages.
+    """
+    def _delete_workspaces(_delete_alg, _workspaces):
+        _workspace_names_to_delete = set([_workspace.name() for _workspace in _workspaces if _workspace is not None])
+        for _workspace_name_to_delete in _workspace_names_to_delete:
+            if _workspace_name_to_delete:
+                _delete_alg.setProperty("Workspace", _workspace_name_to_delete)
+                _delete_alg.execute()
+    delete_name = "DeleteWorkspace"
+    delete_options = {}
+    delete_alg = create_unmanaged_algorithm(delete_name, **delete_options)
+
+    for reduction_package in reduction_packages:
+        # Delete loaded workspaces
+        workspaces_to_delete = list(reduction_package.workspaces.values())
+        _delete_workspaces(delete_alg, workspaces_to_delete)
+
+        # Delete loaded monitors
+        monitors_to_delete = list(reduction_package.monitors.values())
+        _delete_workspaces(delete_alg, monitors_to_delete)
+
+        # Delete can optimizations
+        optimizations_to_delete = [reduction_package.reduced_lab_can,
+                                   reduction_package.reduced_lab_can_count,
+                                   reduction_package.reduced_lab_can_norm,
+                                   reduction_package.reduced_hab_can,
+                                   reduction_package.reduced_hab_can_count,
+                                   reduction_package.reduced_hab_can_norm]
+        _delete_workspaces(delete_alg, optimizations_to_delete)
+
+
+def get_all_names_to_save(reduction_packages):
+    """
+    Extracts all the output names from a list of reduction packages. The main
+
+    @param reduction_packages: a list of reduction packages
+    @return: a list of workspace names to save.
+    """
+    names_to_save = []
+    for reduction_package in reduction_packages:
+        is_part_of_multi_period_reduction = reduction_package.is_part_of_multi_period_reduction
+        is_part_of_event_slice_reduction = reduction_package.is_part_of_event_slice_reduction
+        is_group = is_part_of_multi_period_reduction or is_part_of_event_slice_reduction
+
+        reduced_lab = reduction_package.reduced_lab
+        reduced_hab = reduction_package.reduced_hab
+        reduced_merged = reduction_package.reduced_merged
+
+        # If we have merged reduction then store the
+        if reduced_merged:
+            if is_group:
+                names_to_save.append(reduction_package.reduced_merged_base_name)
+            else:
+                names_to_save.append(reduced_merged.name())
+        else:
+            if reduced_lab:
+                if is_group:
+                    names_to_save.append(reduction_package.reduced_lab_base_name)
+                else:
+                    names_to_save.append(reduced_lab.name())
+
+            if reduced_hab:
+                if is_group:
+                    names_to_save.append(reduction_package.reduced_hab_base_name)
+                else:
+                    names_to_save.append(reduced_hab.name())
+
+    # We might have some workspaces as duplicates (the group workspaces), so make them unique
+    return set(names_to_save)
+
+
+def save_workspace_to_file(output_name, file_formats):
+    """
+    Saves the workspace to the different file formats specified in the state object.
+
+    :param output_name: the name of the output workspace and also the name of the file
+    :param file_formats: a list of file formats to save
+    """
+    save_name = "SANSSave"
+    save_options = {"InputWorkspace": output_name}
+    save_options.update({"Filename": output_name})
+
+    if SaveType.Nexus in file_formats:
+        save_options.update({"Nexus": True})
+    if SaveType.CanSAS in file_formats:
+        save_options.update({"CanSAS": True})
+    if SaveType.NXcanSAS in file_formats:
+        save_options.update({"NXcanSAS": True})
+    if SaveType.NistQxy in file_formats:
+        save_options.update({"NistQxy": True})
+    if SaveType.RKH in file_formats:
+        save_options.update({"RKH": True})
+    if SaveType.CSV in file_formats:
+        save_options.update({"CSV": True})
+
+    save_alg = create_unmanaged_algorithm(save_name, **save_options)
+    save_alg.execute()
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Container classes
+# ----------------------------------------------------------------------------------------------------------------------
+class ReducedDataType(object):
+    class Merged(object):
+        pass
+
+    class LAB(object):
+        pass
+
+    class HAB(object):
+        pass
+
+
+class ReductionPackage(object):
+    """
+    The reduction package is a mutable store for
+    1. The state object which defines our reductions.
+    2. A dictionary with input_workspace_type vs input_workspace
+    3. A dictionary with input_monitor_workspace_type vs input_monitor_workspace
+    4. A flag which indicates if the reduction is part of a multi-period reduction
+    5. A flag which indicates if the reduction is part of a sliced reduction
+    6. The reduced workspaces (not all need to exist)
+    7. The reduced can and the reduced partial can workspaces (non have to exist, this is only for optimizations)
+    """
+    def __init__(self, state, workspaces, monitors, is_part_of_multi_period_reduction=False,
+                 is_part_of_event_slice_reduction=False):
+        super(ReductionPackage, self).__init__()
+        # -------------------------------------------------------
+        # General Settings
+        # -------------------------------------------------------
+        self.state = state
+        self.workspaces = workspaces
+        self.monitors = monitors
+        self.is_part_of_multi_period_reduction = is_part_of_multi_period_reduction
+        self.is_part_of_event_slice_reduction = is_part_of_event_slice_reduction
+        self.reduction_mode = state.reduction.reduction_mode
+
+        # -------------------------------------------------------
+        # Reduced workspaces
+        # -------------------------------------------------------
+        self.reduced_lab = None
+        self.reduced_hab = None
+        self.reduced_merged = None
+
+        # -------------------------------------------------------
+        # Reduced partial can workspaces (and partial workspaces)
+        # -------------------------------------------------------
+        self.reduced_lab_can = None
+        self.reduced_lab_can_count = None
+        self.reduced_lab_can_norm = None
+
+        self.reduced_hab_can = None
+        self.reduced_hab_can_count = None
+        self.reduced_hab_can_norm = None
+
+        # -------------------------------------------------------
+        # Output names and base names
+        # -------------------------------------------------------
+        self.reduced_lab_name = None
+        self.reduced_lab_base_name = None
+        self.reduced_hab_name = None
+        self.reduced_hab_base_name = None
+        self.reduced_merged_name = None
+        self.reduced_merged_base_name = None
+
+        # Partial reduced can workspace names
+        self.reduced_lab_can_name = None
+        self.reduced_lab_can_base_name = None
+        self.reduced_lab_can_count_name = None
+        self.reduced_lab_can_count_base_name = None
+        self.reduced_lab_can_norm_name = None
+        self.reduced_lab_can_norm_base_name = None
+
+        self.reduced_hab_can_name = None
+        self.reduced_hab_can_base_name = None
+        self.reduced_hab_can_count_name = None
+        self.reduced_hab_can_count_base_name = None
+        self.reduced_hab_can_norm_name = None
+        self.reduced_hab_can_norm_base_name = None
diff --git a/scripts/SANS/sans/algorithm_detail/calculate_transmission_helper.py b/scripts/SANS/sans/algorithm_detail/calculate_transmission_helper.py
new file mode 100644
index 0000000000000000000000000000000000000000..87e68b6a232fc1686383b28c88d320bce590db84
--- /dev/null
+++ b/scripts/SANS/sans/algorithm_detail/calculate_transmission_helper.py
@@ -0,0 +1,277 @@
+from __future__ import (absolute_import, division, print_function)
+from mantid.api import ExperimentInfo
+from sans.common.general_functions import (create_unmanaged_algorithm, sanitise_instrument_name)
+from sans.common.constants import EMPTY_NAME
+
+
+def apply_flat_background_correction_to_detectors(workspace, flat_background_correction_start,
+                                                  flat_background_correction_stop):
+    """
+    Applies the flat background correction to all detectors which are not monitors
+
+    :param workspace: the workspace which contains detector spectra which will be corrected.
+    :param flat_background_correction_start: the start of the flat background region
+    :param flat_background_correction_stop: the end of the flat background region
+    :return: a corrected workspace
+    """
+    if flat_background_correction_start is not None and flat_background_correction_stop is not None:
+        flat_name = "CalculateFlatBackground"
+        flat_options = {"InputWorkspace": workspace,
+                        "Mode": "Mean",
+                        "StartX": flat_background_correction_start,
+                        "EndX": flat_background_correction_stop,
+                        "SkipMonitors": True}
+        flat_alg = create_unmanaged_algorithm(flat_name, **flat_options)
+        flat_alg.setPropertyValue("OutputWorkspace", EMPTY_NAME)
+        flat_alg.setProperty("OutputWorkspace", workspace)
+        flat_alg.execute()
+        workspace = flat_alg.getProperty("OutputWorkspace").value
+    return workspace
+
+
+def apply_flat_background_correction_to_monitors(workspace, monitor_indices, background_TOF_monitor_start,
+                                                 background_TOF_monitor_stop, background_TOF_general_start,
+                                                 background_TOF_general_stop):
+    """
+    Applies the flat background correction to some monitors
+
+
+    :param workspace: the workspace which contains monitor spectra which will be corrected.
+    :param monitor_indices: the workspace indices of the monitors which will be corrected.
+    :param background_TOF_monitor_start: a dictionary where the keys are spectrum numbers of monitors (as strings) and
+                                         the values are the start time of the flat background correction.
+    :param background_TOF_monitor_stop: a dictionary where the keys are spectrum numbers of monitors (as strings) and
+                                        the values are the stop time of the flat background correction.
+    :param background_TOF_general_start: the start value of the general background region. This is used if
+                                         the monitor-specific setting does not exist
+    :param background_TOF_general_stop: the stop value of the general background region. This is used if
+                                         the monitor-specific setting does not exist
+    :return: a corrected workspace.
+    """
+    for workspace_index in monitor_indices:
+        # Get the the flat background region for this monitor.
+        spectrum = workspace.getSpectrum(workspace_index)
+        spectrum_number = spectrum.getSpectrumNo()
+        monitor_key = str(spectrum_number)
+        if monitor_key not in background_TOF_monitor_start and monitor_key not in background_TOF_monitor_stop \
+                and background_TOF_general_start is None and background_TOF_general_stop is None:
+            continue
+        tof_start = background_TOF_monitor_start[monitor_key] if monitor_key in background_TOF_monitor_start else \
+            background_TOF_general_start
+        tof_stop = background_TOF_monitor_stop[monitor_key] if monitor_key in background_TOF_monitor_stop else \
+            background_TOF_general_stop
+
+        flat_name = "CalculateFlatBackground"
+        flat_options = {"InputWorkspace": workspace,
+                        "Mode": "Mean",
+                        "StartX": tof_start,
+                        "EndX": tof_stop,
+                        "WorkspaceIndexList": workspace_index}
+        flat_alg = create_unmanaged_algorithm(flat_name, **flat_options)
+        flat_alg.setPropertyValue("OutputWorkspace", EMPTY_NAME)
+        flat_alg.setProperty("OutputWorkspace", workspace)
+        flat_alg.execute()
+        workspace = flat_alg.getProperty("OutputWorkspace").value
+    return workspace
+
+
+def get_workspace_indices_for_monitors(workspace):
+    """
+    Creates a generator of workspaces indices corresponding to spectra which are actually monitors
+
+    :param workspace: workspace to check for monitors.
+    :return: a generator for workspace indices.
+    """
+    for index in range(workspace.getNumberHistograms()):
+        detector = workspace.getDetector(index)
+        if detector.isMonitor():
+            yield index
+
+
+def get_detector_id_for_spectrum_number(workspace, spectrum_number):
+    """
+    Gets the detector id of a spectrum for a given spectrum number.
+
+    :param workspace: the workspace with the relevant spectrum.
+    :param spectrum_number: the spectrum number.
+    :return: the corresponding detector id.
+    """
+    try:
+        workspace_index = workspace.getIndexFromSpectrumNumber(spectrum_number)
+        detector = workspace.getDetector(workspace_index)
+        detector_id = detector.getID()
+    except RuntimeError:
+        detector_id = None
+    return detector_id
+
+
+def get_idf_path_from_workspace(workspace):
+    """
+    Gets the full IDF path from a workspace.
+
+    It queries the workspace for the start time and instrument name. It gets the IDF path from the ExperimentInfo.
+    :param workspace: the workspace for which we want the full IDF path.
+    :return: the full IDF path for the instrument of the workspace.
+    """
+    run = workspace.run()
+    instrument = workspace.getInstrument()
+    instrument_name = instrument.getName()
+    instrument_name = sanitise_instrument_name(instrument_name)
+    if run.hasProperty("start_time"):
+        time = run.getProperty("start_time").value
+        idf_path = ExperimentInfo.getInstrumentFilename(instrument_name, time)
+    elif run.hasProperty("run_start"):
+        time = run.getProperty("run_start").value
+        idf_path = ExperimentInfo.getInstrumentFilename(instrument_name, time)
+    else:
+        idf_path = None
+    return idf_path
+
+
+def get_masked_det_ids_from_mask_file(mask_file_path, idf_path):
+    """
+    Given a mask file and the (necessary) path to the corresponding IDF, will
+    load in the file and return a list of detector IDs that are masked.
+
+    TODO: Investigate if there is a better way of finding the detector ids from a mask file. This is a minor performance
+          bottleneck and does not seem quite right
+         * Check if parsing the file provides a better performance
+
+    :param mask_file_path: the path of the mask file to read in
+    :param idf_path: the path to the corresponding IDF. Necessary so that we
+                       know exactly which instrument to use, and therefore know
+                       the correct detector IDs.
+    :return the list of detector IDs that were masked in the file
+    """
+    mask_name = "LoadMask"
+    mask_options = {"Instrument": idf_path,
+                    "InputFile": mask_file_path,
+                    "OutputWorkspace": EMPTY_NAME}
+    mask_alg = create_unmanaged_algorithm(mask_name, **mask_options)
+    mask_alg.execute()
+    workspace = mask_alg.getProperty("OutputWorkspace").value
+    return list(yield_masked_det_ids(workspace))
+
+
+def yield_masked_det_ids(masking_workspace):
+    """
+    For some reason Detector.isMasked() does not work for MaskingWorkspaces.
+    We use masking_ws.readY(ws_index)[0] == 1 instead.
+
+    :param masking_workspace: a mask workspace
+    :return: a list of detector ids
+    """
+    for ws_index in range(masking_workspace.getNumberHistograms()):
+        if masking_workspace.readY(ws_index)[0] == 1:
+            yield masking_workspace.getDetector(ws_index).getID()
+
+
+def get_masked_det_ids(workspace):
+    """
+    Given a workspace, will return a list of all the IDs that correspond to
+    detectors that have been masked.
+
+    :param workspace : the workspace to extract the det IDs from
+    :return: a list of IDs for masked detectors
+    """
+    for ws_index in range(workspace.getNumberHistograms()):
+        try:
+            detector = workspace.getDetector(ws_index)
+        except RuntimeError:
+            # Skip the rest after finding the first spectra with no detectors,
+            # which is a big speed increase for SANS2D.
+            break
+        if detector.isMasked():
+            yield detector.getID()
+
+
+def infinite_cylinder_xml(id_name, centre, radius, axis):
+    """
+    Creates a mask for an infinite cylinder along the z axis
+    :param id_name: the id name
+    :param centre: a collection with three entries defining the centre
+    :param radius: the cylinder radius
+    :param axis: a collection with three entries defining the axis
+    :return: the infinite cylinder masking xml
+    """
+    return '<infinite-cylinder id="' + str(id_name) + '">' + \
+           '<centre x="' + str(centre[0]) + '" y="' + str(centre[1]) + '" z="' + str(centre[2]) + '" />' + \
+           '<axis x="' + str(axis[0]) + '" y="' + str(axis[1]) + '" z="' + str(axis[2]) + '" />' + \
+           '<radius val="' + str(radius) + '" />' + \
+           '</infinite-cylinder>\n'
+
+
+def mask_with_cylinder(workspace, radius, x_centre, y_centre, algebra):
+    """
+    Mask a cylinder on the input workspace.
+
+    :param workspace: the workspace to mask
+    :param radius: the masking radius
+    :param x_centre: the x position of the masking radius
+    :param y_centre: the y position of the masking radius
+    :param algebra: a masking algebra
+    """
+    xml_def = infinite_cylinder_xml('shape', [x_centre, y_centre, 0.0], radius, [0, 0, 1])
+    xml_def += '<algebra val="' + algebra + 'shape" />'
+
+    mask_name = "MaskDetectorsInShape"
+    mask_options = {"Workspace": workspace,
+                    "ShapeXML": xml_def}
+    mask_alg = create_unmanaged_algorithm(mask_name, **mask_options)
+    mask_alg.execute()
+    return mask_alg.getProperty("Workspace").value
+
+
+def get_region_of_interest(workspace, radius=None, roi_files=None, mask_files=None):
+    """
+    Calculate the various contributions to the "region of interest", used in the
+    transmission calculation.
+
+    The region of interest can be made up of a circle of detectors (with a given radius)
+    around the beam centre, and/or one or more mask files, and/or the main detector bank.
+    Note that the mask files wont actually be used for masking, we're just piggy-backing
+    on the functionality that they provide. Note that in the case of a radius, we have
+    to ensure that we do not use a workspace which already has masked detectors, since
+    they would contribute to the ROI.
+    :param workspace: the workspace which is used for the transmission calculation
+    :param radius: the radius of the region of interest
+    :param roi_files: a list of roi files. Spectra in the ROI contribute to the
+                      transmission calculation.
+    :param mask_files: a list of mask files. Spectra in the Mask explicitly do not
+                       contribute to the transmission calculation.
+    :return: a list of spectrum numbers
+    """
+    trans_roi = []
+
+    if radius is not None:
+        # Mask out a cylinder with the given radius in a copy of the workspace.
+        # The centre position of the Cylinder does not require a shift, as all
+        # components have been shifted already, when the workspaces were loaded
+        clone_name = "CloneWorkspace"
+        clone_options = {"InputWorkspace": workspace,
+                         "OutputWorkspace": EMPTY_NAME}
+        clone_alg = create_unmanaged_algorithm(clone_name, **clone_options)
+        clone_alg.execute()
+        cloned_workspace = clone_alg.getProperty("OutputWorkspace").value
+
+        # Mask the cylinder around a centre of (0, 0)
+        mask_with_cylinder(cloned_workspace, radius, 0.0, 0.0, "")
+
+        # Extract the masked detector ID's.
+        trans_roi += get_masked_det_ids(cloned_workspace)
+
+    idf_path = get_idf_path_from_workspace(workspace)
+
+    if roi_files is not None and idf_path is not None:
+        for roi_file in roi_files:
+            trans_roi += get_masked_det_ids_from_mask_file(roi_file, idf_path)
+
+    masked_ids = []
+    if mask_files is not None and idf_path is not None:
+        for mask_file in mask_files:
+            masked_ids += get_masked_det_ids_from_mask_file(mask_file, idf_path)
+
+    # Detector ids which are not allowed and specified by "masked_ids" need to
+    # be removed from the trans_roi list
+    # Remove duplicates and sort.
+    return sorted(set(trans_roi) - set(masked_ids))
diff --git a/scripts/SANS/sans/algorithm_detail/load_data.py b/scripts/SANS/sans/algorithm_detail/load_data.py
index 62f53e110c29973f50142bcdde2d42429b4b38e9..997ada3730da322362ef6167b1dbd7e1214f0bf5 100644
--- a/scripts/SANS/sans/algorithm_detail/load_data.py
+++ b/scripts/SANS/sans/algorithm_detail/load_data.py
@@ -154,8 +154,8 @@ def is_data_transmission_and_event_mode(file_infos):
     """
     Checks if a file is used as a transmission workspace and contains event-mode data. This is not allowed.
 
-    @param file_infos: a dict of DataType vs FileInformation objects
-    @return: True if the file setting is bad else False
+    :param file_infos: a dict of DataType vs FileInformation objects
+    :return: True if the file setting is bad else False
     """
     is_bad_file_setting = False
     for key, value in list(file_infos.items()):
@@ -235,9 +235,9 @@ def is_calibration_correct(workspace, calibration_file):
     Check if the calibration has been applied. If no calibration has been specified then none should be have
     been applied.
 
-    @param workspace: the workspace to check.
-    @param calibration_file: the path to the calibration file.
-    @return: True if the calibration file matches or if none was set and the path is empty, else False
+    :param workspace: the workspace to check.
+    :param calibration_file: the path to the calibration file.
+    :return: True if the calibration file matches or if none was set and the path is empty, else False
     """
     has_calibration = has_tag(CALIBRATION_WORKSPACE_TAG, workspace)
     return (has_calibration and calibration_file == get_tag(CALIBRATION_WORKSPACE_TAG, workspace)) or\
@@ -355,7 +355,7 @@ def run_added_loader(loader, file_information, is_transmission, period, parent_a
     :param is_transmission: if  the set is a transmission
     :param period: the selected period
     :param parent_alg: a handle to the parent algorithm
-    @return: workspaces and monitors
+    :return: workspaces and monitors
     """
     def extract_histogram_data(load_alg, num_periods, selected_period):
         ws_collection = []
diff --git a/scripts/SANS/sans/algorithm_detail/merge_reductions.py b/scripts/SANS/sans/algorithm_detail/merge_reductions.py
new file mode 100644
index 0000000000000000000000000000000000000000..7b550c2a3424b543d4b898caa28525f62c6f798d
--- /dev/null
+++ b/scripts/SANS/sans/algorithm_detail/merge_reductions.py
@@ -0,0 +1,173 @@
+""" Merges two reduction types to single reduction"""
+
+from __future__ import (absolute_import, division, print_function)
+from abc import (ABCMeta, abstractmethod)
+from six import with_metaclass
+from sans.common.general_functions import create_child_algorithm
+from sans.common.enums import (SANSInstrument, DataType, FitModeForMerge)
+from sans.algorithm_detail.bundles import MergeBundle
+
+
+class Merger(with_metaclass(ABCMeta, object)):
+    """ Merger interface"""
+
+    @abstractmethod
+    def merge(self, reduction_mode_vs_output_bundles, parent_alg=None):
+        pass
+
+
+class ISIS1DMerger(Merger):
+    """
+    Class which handles ISIS-style merges.
+    """
+    def __init__(self):
+        super(ISIS1DMerger, self).__init__()
+
+    def merge(self, reduction_mode_vs_output_bundles, parent_alg=None):
+        """
+        Merges two partial reductions to obtain a merged reduction.
+
+        :param reduction_mode_vs_output_bundles: a ReductionMode vs OutputBundle map
+        :param parent_alg: a handle to the parent algorithm.
+        :return: a MergeBundle with the merged which contains the merged workspace.
+        """
+        # Get the primary and secondary detectors for stitching. This is normally LAB and HAB, but in other scenarios
+        # there might be completely different detectors. This approach allows future adjustments to the stitching
+        # configuration. The data from the secondary detector will be stitched to the data from the primary detector.
+
+        primary_detector, secondary_detector = get_detectors_for_merge(reduction_mode_vs_output_bundles)
+        sample_count_primary, sample_norm_primary, sample_count_secondary, sample_norm_secondary = \
+            get_partial_workspaces(primary_detector, secondary_detector, reduction_mode_vs_output_bundles, is_sample)
+
+        # Get the relevant workspaces from the reduction settings. For this we need to first understand what the
+        can_count_primary, can_norm_primary, can_count_secondary, can_norm_secondary = \
+            get_partial_workspaces(primary_detector, secondary_detector, reduction_mode_vs_output_bundles, is_can)
+
+        # Get fit parameters
+        shift_factor, scale_factor, fit_mode = get_shift_and_scale_parameter(reduction_mode_vs_output_bundles)
+        fit_mode_as_string = FitModeForMerge.to_string(fit_mode)
+
+        # We need to convert NoFit to None.
+        if fit_mode_as_string == "NoFit":
+            fit_mode_as_string = "None"
+
+        # Run the SANSStitch algorithm
+        stitch_name = "SANSStitch"
+        stitch_options = {"HABCountsSample": sample_count_secondary,
+                          "HABNormSample": sample_norm_secondary,
+                          "LABCountsSample": sample_count_primary,
+                          "LABNormSample": sample_norm_primary,
+                          "ProcessCan": False,
+                          "Mode": fit_mode_as_string,
+                          "ScaleFactor": scale_factor,
+                          "ShiftFactor": shift_factor,
+                          "OutputWorkspace": "dummy"}
+
+        if can_count_primary is not None and can_norm_primary is not None \
+                and can_count_secondary is not None and can_norm_secondary is not None:
+            stitch_options_can = {"HABCountsCan": can_count_secondary,
+                                  "HABNormCan": can_norm_secondary,
+                                  "LABCountsCan": can_count_primary,
+                                  "LABNormCan": can_norm_primary,
+                                  "ProcessCan": True}
+            stitch_options.update(stitch_options_can)
+
+        stitch_alg = create_child_algorithm(parent_alg, stitch_name, **stitch_options)
+        stitch_alg.execute()
+
+        # Get the fit values
+        shift_from_alg = stitch_alg.getProperty("OutShiftFactor").value
+        scale_from_alg = stitch_alg.getProperty("OutScaleFactor").value
+        merged_workspace = stitch_alg.getProperty("OutputWorkspace").value
+
+        # Return a merge bundle with the merged workspace and the fitted scale and shift factor (they are good
+        # diagnostic tools which are desired by the instrument scientists.
+        return MergeBundle(merged_workspace=merged_workspace, shift=shift_from_alg, scale=scale_from_alg)
+
+
+class NullMerger(Merger):
+    def __init__(self):
+        super(NullMerger, self).__init__()
+
+    def merge(self, reduction_mode_vs_output_bundles, parent_alg=None):
+        pass
+
+
+class MergeFactory(object):
+    def __init__(self):
+        super(MergeFactory, self).__init__()
+
+    @staticmethod
+    def create_merger(state):
+        # The selection depends on the facility/instrument
+        data_info = state.data
+        instrument = data_info.instrument
+
+        if instrument is SANSInstrument.LARMOR or instrument is SANSInstrument.LOQ or \
+           instrument is SANSInstrument.SANS2D:
+            merger = ISIS1DMerger()
+        else:
+            merger = NullMerger()
+            RuntimeError("MergeFactory: The merging for your selection has not been implemented yet.")
+        return merger
+
+
+def get_detectors_for_merge(output_bundles):
+    """
+    Extracts the merge strategy from the output bundles. This is the name of the primary and the secondary detector.
+
+    The merge strategy will let us know which two detectors are to be merged. This abstraction might be useful in the
+    future if we are dealing with more than two detector banks.
+    :param output_bundles: a ReductionMap vs OutputBundle map
+    :return: the primary detector and the secondary detector.
+    """
+    reduction_settings_collection = next(iter(list(output_bundles.values())))
+    state = reduction_settings_collection[0].state
+    reduction_info = state.reduction
+    return reduction_info.get_merge_strategy()
+
+
+def get_partial_workspaces(primary_detector, secondary_detector, reduction_mode_vs_output_bundles, is_data_type):
+    """
+    Get the partial workspaces for the primary and secondary detectors.
+
+    :param primary_detector: the primary detector (now normally ISISReductionMode.LAB)
+    :param secondary_detector: the secondary detector (now normally ISISReductionMode.HAB)
+    :param reduction_mode_vs_output_bundles: a ReductionMode vs OutputBundles map
+    :param is_data_type: the data type, i.e. if can or sample
+    :return: the primary count workspace, the primary normalization workspace, the secondary count workspace and the
+             secondary normalization workspace.
+    """
+    # Get primary reduction information for specified data type, i.e. sample or can
+    primary = reduction_mode_vs_output_bundles[primary_detector]
+    primary_for_data_type = next((setting for setting in primary if is_data_type(setting)), None)
+    primary_count = primary_for_data_type.output_workspace_count
+    primary_norm = primary_for_data_type.output_workspace_norm
+
+    # Get secondary reduction information for specified data type, i.e. sample or can
+    secondary = reduction_mode_vs_output_bundles[secondary_detector]
+    secondary_for_data_type = next((setting for setting in secondary if is_data_type(setting)), None)
+    secondary_count = secondary_for_data_type.output_workspace_count
+    secondary_norm = secondary_for_data_type.output_workspace_norm
+    return primary_count, primary_norm, secondary_count, secondary_norm
+
+
+def get_shift_and_scale_parameter(reduction_mode_vs_output_bundles):
+    """
+    Gets the shfit and scale parameter from a set of OutputBundles
+
+    :param reduction_mode_vs_output_bundles: a ReductionMode vs OutputBundle map
+    :return: the shift, scale and fit mode.
+    """
+    reduction_settings_collection = next(iter(list(reduction_mode_vs_output_bundles.values())))
+    state = reduction_settings_collection[0].state
+    reduction_info = state.reduction
+    return reduction_info.merge_shift, reduction_info.merge_scale, reduction_info.merge_fit_mode
+
+
+def is_sample(x):
+    return x.data_type is DataType.Sample
+
+
+def is_can(x):
+    return x.data_type is DataType.Can
diff --git a/scripts/SANS/sans/algorithm_detail/q_resolution_calculator.py b/scripts/SANS/sans/algorithm_detail/q_resolution_calculator.py
index 744826d82922a32cb313283fc1c893a67afc3fff..4e5460342fbdf0246a30d021a5761f5aeac82050 100644
--- a/scripts/SANS/sans/algorithm_detail/q_resolution_calculator.py
+++ b/scripts/SANS/sans/algorithm_detail/q_resolution_calculator.py
@@ -13,8 +13,8 @@ from sans.common.general_functions import create_unmanaged_algorithm
 def load_sigma_moderator_workspace(file_name):
     """
     Gets the sigma moderator workspace.
-    @param file_name: the file name of the sigma moderator
-    @returns the sigma moderator workspace
+    :param file_name: the file name of the sigma moderator
+    :returns the sigma moderator workspace
     """
     load_name = "LoadRKH"
     load_option = {"Filename": file_name,
@@ -37,8 +37,8 @@ def get_aperture_diameters(convert_to_q):
     Gets the aperture diameters for the sample and the source
     If all fields are specified for a rectangular aperture then this is used, else a circular aperture is
     used.
-    @param convert_to_q: a SANSStateConvertToQ object.
-    @return: aperture diameter for the source, aperture diameter for the sample
+    :param convert_to_q: a SANSStateConvertToQ object.
+    :return: aperture diameter for the source, aperture diameter for the sample
     """
     def set_up_diameter(height, width):
         """
@@ -64,9 +64,9 @@ def get_aperture_diameters(convert_to_q):
 def create_q_resolution_workspace(convert_to_q, data_workspace):
     """
     Provides a q resolution workspace
-    @param convert_to_q: a SANSStateConvertToQ object.
-    @param data_workspace: the workspace which is to be reduced.
-    @return: a q resolution workspace
+    :param convert_to_q: a SANSStateConvertToQ object.
+    :param data_workspace: the workspace which is to be reduced.
+    :return: a q resolution workspace
     """
     # Load the sigma moderator
     file_name = convert_to_q.moderator_file
@@ -115,9 +115,9 @@ class QResolutionCalculator(with_metaclass(ABCMeta, object)):
     def get_q_resolution_workspace(self, convert_to_q_info, data_workspace):
         """
         Calculates the q resolution workspace which is required for the Q1D algorithm
-        @param convert_to_q_info: a SANSStateConvertToQ object
-        @param data_workspace: the workspace which is being reduced.
-        @return: a q resolution workspace or None
+        :param convert_to_q_info: a SANSStateConvertToQ object
+        :param data_workspace: the workspace which is being reduced.
+        :return: a q resolution workspace or None
         """
         pass
 
diff --git a/scripts/SANS/sans/algorithm_detail/single_execution.py b/scripts/SANS/sans/algorithm_detail/single_execution.py
new file mode 100644
index 0000000000000000000000000000000000000000..5ec5f48ac09a16659dc11b20743ac4902aa982cc
--- /dev/null
+++ b/scripts/SANS/sans/algorithm_detail/single_execution.py
@@ -0,0 +1,256 @@
+from __future__ import (absolute_import, division, print_function)
+from sans.common.constants import EMPTY_NAME
+from sans.common.general_functions import (create_child_algorithm,
+                                           write_hash_into_reduced_can_workspace,
+                                           get_reduced_can_workspace_from_ads)
+from sans.common.enums import (ISISReductionMode, DetectorType, DataType, OutputParts)
+from sans.algorithm_detail.strip_end_nans_and_infs import strip_end_nans
+from sans.algorithm_detail.merge_reductions import (MergeFactory, is_sample, is_can)
+from sans.algorithm_detail.bundles import (OutputBundle, OutputPartsBundle)
+
+
+def run_core_reduction(reduction_alg, reduction_setting_bundle):
+    """
+    This function runs a core reduction. This is essentially half a reduction (either smaple or can).
+
+    :param reduction_alg: a handle to the reduction algorithm.
+    :param reduction_setting_bundle: a ReductionSettingBundle tuple
+    :return: an OutputBundle and an OutputPartsBundle
+    """
+
+    # Get component to reduce
+    component = get_component_to_reduce(reduction_setting_bundle)
+    # Set the properties on the reduction algorithms
+    serialized_state = reduction_setting_bundle.state.property_manager
+    reduction_alg.setProperty("SANSState", serialized_state)
+    reduction_alg.setProperty("Component", component)
+    reduction_alg.setProperty("ScatterWorkspace", reduction_setting_bundle.scatter_workspace)
+    reduction_alg.setProperty("ScatterMonitorWorkspace", reduction_setting_bundle.scatter_monitor_workspace)
+    reduction_alg.setProperty("DataType", DataType.to_string(reduction_setting_bundle.data_type))
+
+    if reduction_setting_bundle.transmission_workspace is not None:
+        reduction_alg.setProperty("TransmissionWorkspace", reduction_setting_bundle.transmission_workspace)
+
+    if reduction_setting_bundle.direct_workspace is not None:
+        reduction_alg.setProperty("DirectWorkspace", reduction_setting_bundle.direct_workspace)
+
+    reduction_alg.setProperty("OutputWorkspace", EMPTY_NAME)
+    reduction_alg.setProperty("SumOfCounts", EMPTY_NAME)
+    reduction_alg.setProperty("SumOfNormFactors", EMPTY_NAME)
+
+    # Run the reduction core
+    reduction_alg.execute()
+
+    # Get the results
+    output_workspace = reduction_alg.getProperty("OutputWorkspace").value
+    output_workspace_count = reduction_alg.getProperty("SumOfCounts").value
+    output_workspace_norm = reduction_alg.getProperty("SumOfNormFactors").value
+
+    # Pull the result out of the workspace
+    output_bundle = OutputBundle(state=reduction_setting_bundle.state,
+                                 data_type=reduction_setting_bundle.data_type,
+                                 reduction_mode=reduction_setting_bundle.reduction_mode,
+                                 output_workspace=output_workspace)
+
+    output_parts_bundle = OutputPartsBundle(state=reduction_setting_bundle.state,
+                                            data_type=reduction_setting_bundle.data_type,
+                                            reduction_mode=reduction_setting_bundle.reduction_mode,
+                                            output_workspace_count=output_workspace_count,
+                                            output_workspace_norm=output_workspace_norm)
+    return output_bundle, output_parts_bundle
+
+
+def get_final_output_workspaces(output_bundles, parent_alg):
+    """
+    This function provides the final steps for the data reduction.
+
+    The final steps are:
+    1. Can Subtraction (if required)
+    2. Data clean up (if required)
+    :param output_bundles: A set of outputBundles
+    :param parent_alg: a handle to the parent algorithm.
+    :return: a map of ReductionMode vs final output workspaces.
+    """
+
+    reduction_mode_vs_output_bundles = get_reduction_mode_vs_output_bundles(output_bundles)
+
+    # For each reduction mode, we need to perform a can subtraction (and potential cleaning of the workspace)
+    final_output_workspaces = {}
+    for reduction_mode, output_bundles in reduction_mode_vs_output_bundles.items():
+        # Find the sample and the can in the data collection
+        output_sample_workspace = next((output_bundle.output_workspace for output_bundle in output_bundles
+                                        if is_sample(output_bundle)), None)
+        output_can_workspace = next((output_bundle.output_workspace for output_bundle in output_bundles
+                                     if is_can(output_bundle)), None)
+        # Perform the can subtraction
+        if output_can_workspace is not None:
+            final_output_workspace = perform_can_subtraction(output_sample_workspace, output_can_workspace, parent_alg)
+        else:
+            final_output_workspace = output_sample_workspace
+
+        # Tidy up the workspace by removing start/end-NANs and start/end-INFs
+        final_output_workspace = strip_end_nans(final_output_workspace, parent_alg)
+        final_output_workspaces.update({reduction_mode: final_output_workspace})
+
+    # Finally add sample log information
+    # TODO: Add log information
+
+    return final_output_workspaces
+
+
+def perform_can_subtraction(sample, can, parent_alg):
+    """
+    Subtracts the can from the sample workspace.
+
+    We need to manually take care of the q resolution issue here.
+    :param sample: the sample workspace
+    :param can: the can workspace.
+    :param parent_alg: a handle to the parent algorithm.
+    :return: the subtracted workspace.
+    """
+    subtraction_name = "Minus"
+    subtraction_options = {"LHSWorkspace": sample,
+                           "RHSWorkspace": can,
+                           "OutputWorkspace": EMPTY_NAME}
+    subtraction_alg = create_child_algorithm(parent_alg, subtraction_name, **subtraction_options)
+    subtraction_alg.execute()
+    output_workspace = subtraction_alg.getProperty("OutputWorkspace").value
+
+    # If the workspace is 1D and contains Q resolution (i.e. DX values), then we need to make sure that the
+    # resulting output workspace contains the correct values
+    correct_q_resolution_for_can(sample, can, output_workspace)
+
+    return output_workspace
+
+
+def correct_q_resolution_for_can(sample_workspace, can_workspace, subtracted_workspace):
+    """
+    Sets the correct Q resolution on a can-subtracted workspace.
+
+    We need to transfer the Q resolution from the original workspaces to the subtracted
+    workspace. Richard wants us to ignore potential DX values for the CAN workspace (they
+    would be very small any way). The Q resolution functionality only exists currently
+    for 1D, ie when only one spectrum is present.
+    """
+    _ = can_workspace  # noqa
+    if sample_workspace.getNumberHistograms() == 1 and sample_workspace.hasDx(0):
+        subtracted_workspace.setDx(0, sample_workspace.dataDx(0))
+
+
+def get_merge_bundle_for_merge_request(output_bundles, parent_alg):
+    """
+    Create a merge bundle for the reduction outputs and perform stitching if required
+    :param output_bundles: a list of output_bundles
+    :param parent_alg: a handle to the parent algorithm
+    """
+    # Order the reductions. This leaves us with a dict mapping from the reduction type (i.e. HAB, LAB) to
+    # a list of reduction settings which contain the information for sample and can.
+    reduction_mode_vs_output_bundles = get_reduction_mode_vs_output_bundles(output_bundles)
+
+    # Get the underlying state from one of the elements
+    state = output_bundles[0].state
+
+    merge_factory = MergeFactory()
+    merger = merge_factory.create_merger(state)
+
+    # Run the merger and return the merged output workspace
+    return merger.merge(reduction_mode_vs_output_bundles, parent_alg)
+
+
+def get_reduction_mode_vs_output_bundles(output_bundles):
+    """
+    Groups the reduction information by the reduction mode, e.g. all information regarding HAB is collated, similarly
+    for LAB.
+    """
+    outputs = {}
+    # Pair up the different reduction modes
+    for output_bundle in output_bundles:
+        key = output_bundle.reduction_mode
+        if key in outputs:
+            outputs[key].append(output_bundle)
+        else:
+            outputs.update({key: [output_bundle]})
+    return outputs
+
+
+def get_component_to_reduce(reduction_setting_bundle):
+    """
+    Gets the component to reduce as string. Currently we encode this as LAB or HAB.
+
+    :param reduction_setting_bundle: a ReductionSettingBundle tuple.
+    :return: the reduction mode as a string.
+    """
+    # Get the reduction mode
+    reduction_mode = reduction_setting_bundle.reduction_mode
+
+    if reduction_mode is ISISReductionMode.LAB:
+        reduction_mode_setting = DetectorType.to_string(DetectorType.LAB)
+    elif reduction_mode is ISISReductionMode.HAB:
+        reduction_mode_setting = DetectorType.to_string(DetectorType.HAB)
+    else:
+        raise RuntimeError("SingleExecution: An unknown reduction mode was selected: {}. "
+                           "Currently only HAB and LAB are supported.".format(reduction_mode))
+    return reduction_mode_setting
+
+
+def run_optimized_for_can(reduction_alg, reduction_setting_bundle):
+    """
+    Check if the state can reduction already exists, and if so, use it else reduce it and add it to the ADS.
+
+    @param reduction_alg: a handle to the SANSReductionCore algorithm
+    @param reduction_setting_bundle: a ReductionSettingBundle tuple.
+    @return: a reduced workspace, a partial output workspace for the counts, a partial workspace for the normalization.
+    """
+    state = reduction_setting_bundle.state
+    output_parts = reduction_setting_bundle.output_parts
+    reduction_mode = reduction_setting_bundle.reduction_mode
+    data_type = reduction_setting_bundle.data_type
+    reduced_can_workspace, reduced_can_workspace_count, reduced_can_workspace_norm = \
+        get_reduced_can_workspace_from_ads(state, output_parts, reduction_mode)
+    # Set the results on the output bundle
+    output_bundle = OutputBundle(state=state, data_type=data_type, reduction_mode=reduction_mode,
+                                 output_workspace=reduced_can_workspace)
+    output_parts_bundle = OutputPartsBundle(state=state, data_type=data_type, reduction_mode=reduction_mode,
+                                            output_workspace_count=reduced_can_workspace_count,
+                                            output_workspace_norm=reduced_can_workspace_norm)
+    # The logic table for the recalculation of the partial outputs is:
+    # | output_parts | reduced_can_workspace_count is None |  reduced_can_workspace_norm is None | Recalculate |
+    # ----------------------------------------------------------------------------------------------------------
+    # |  False       |        True                         |           True                      |    False    |
+    # |  False       |        True                         |           False                     |    False    |
+    # |  False       |        False                        |           True                      |    False    |
+    # |  False       |        False                        |           False                     |    False    |
+    # |  True        |        True                         |           True                      |    False    |
+    # |  True        |        True                         |           False                     |    True     |
+    # |  True        |        False                        |           True                      |    True     |
+    # |  True        |        False                        |           False                     |    False    |
+
+    is_invalid_partial_workspaces = ((output_parts_bundle.output_workspace_count is None and
+                                     output_parts_bundle.output_workspace_norm is not None) or
+                                     (output_parts_bundle.output_workspace_count is not None and
+                                     output_parts_bundle.output_workspace_norm is None))
+    partial_output_require_reload = output_parts and is_invalid_partial_workspaces
+
+    if output_bundle.output_workspace is None or partial_output_require_reload:
+        output_bundle, output_parts_bundle = run_core_reduction(reduction_alg, reduction_setting_bundle)
+
+        # Now we need to tag the workspaces and add it to the ADS
+        if output_bundle.output_workspace is not None:
+            write_hash_into_reduced_can_workspace(state=output_bundle.state,
+                                                  workspace=output_bundle.output_workspace,
+                                                  partial_type=None,
+                                                  reduction_mode=reduction_mode)
+
+        if (output_parts_bundle.output_workspace_count is not None and
+           output_parts_bundle.output_workspace_norm is not None):
+            write_hash_into_reduced_can_workspace(state=output_parts_bundle.state,
+                                                  workspace=output_parts_bundle.output_workspace_count,
+                                                  partial_type=OutputParts.Count,
+                                                  reduction_mode=reduction_mode)
+
+            write_hash_into_reduced_can_workspace(state=output_parts_bundle.state,
+                                                  workspace=output_parts_bundle.output_workspace_norm,
+                                                  partial_type=OutputParts.Norm,
+                                                  reduction_mode=reduction_mode)
+
+    return output_bundle, output_parts_bundle
diff --git a/scripts/SANS/sans/algorithm_detail/strip_end_nans_and_infs.py b/scripts/SANS/sans/algorithm_detail/strip_end_nans_and_infs.py
new file mode 100644
index 0000000000000000000000000000000000000000..08ffc8bf566c8392ad382f49869f86045556d492
--- /dev/null
+++ b/scripts/SANS/sans/algorithm_detail/strip_end_nans_and_infs.py
@@ -0,0 +1,57 @@
+from __future__ import (absolute_import, division, print_function)
+from math import (isinf, isnan)
+from sans.common.constants import EMPTY_NAME
+from sans.common.general_functions import create_child_algorithm
+
+
+def strip_end_nans(workspace, parent_alg=None):
+    """
+    This function removes the INFs and NANs from the start and end of a 1D workspace.
+
+    :param workspace: The workspace which is about to be
+    :param parent_alg: a handle to the parent algorithm
+    :return: A trimmed NAN- and INF-trimmed workspace
+    """
+    # If the workspace is larger than 1D, then there is nothing we can do
+    if workspace.getNumberHistograms() > 1:
+        return workspace
+    data = workspace.readY(0)
+    # Find the index at which the first legal value appears
+
+    start_index = next((index for index in range(len(data)) if is_valid_data(data[index])), None)
+    end_index = next((index for index in range(len(data)-1, -1, -1) if is_valid_data(data[index])), None)
+
+    # If an index was not found then we return the current workspace. This means that all entries are either INFs
+    # or NANs.
+
+    if start_index is None or end_index is None:
+        return workspace
+
+    # Get the corresponding Q values
+    q_values = workspace.readX(0)
+
+    start_q = q_values[start_index]
+
+    # Make sure we're inside the bin that we want to crop. This is part of the old framework. It looks like a bug fix,
+    # hence we leave it in here for now. In general this is risky, and it should be a fraction of a bin width by which
+    # we increase the end value
+    is_point_data = len(workspace.dataX(0)) == len(workspace.dataY(0))
+    if is_point_data:
+        end_q = 1.001 * q_values[end_index]
+    else:
+        end_q = 1.001 * q_values[end_index + 1]
+
+    # Crop the workspace in place
+    crop_name = "CropWorkspace"
+    crop_options = {"InputWorkspace": workspace,
+                    "XMin": start_q,
+                    "XMax": end_q}
+    crop_alg = create_child_algorithm(parent_alg, crop_name, **crop_options)
+    crop_alg.setProperty("OutputWorkspace", EMPTY_NAME)
+    crop_alg.execute()
+    ws = crop_alg.getProperty("OutputWorkspace").value
+    return ws
+
+
+def is_valid_data(value):
+    return not isinf(value) and not isnan(value)
diff --git a/scripts/SANS/sans/algorithm_detail/xml_shapes.py b/scripts/SANS/sans/algorithm_detail/xml_shapes.py
index a40940d77ed909445e033762345c6cc3bb610f74..d2e658aae28e328b0f74a699a63f2be9be0aee4f 100644
--- a/scripts/SANS/sans/algorithm_detail/xml_shapes.py
+++ b/scripts/SANS/sans/algorithm_detail/xml_shapes.py
@@ -5,8 +5,8 @@ from math import (pi, cos, sin)
 def add_xml_shape(xml, complete_xml_element):
     """
         Add an arbitrary shape to region to be masked
-        @param xml: a list of shapes to which we append here
-        @param complete_xml_element: description of the shape to add
+        :param xml: a list of shapes to which we append here
+        :param complete_xml_element: description of the shape to add
     """
     if not complete_xml_element.startswith('<'):
         raise ValueError('Excepted xml string but found: ' + str(complete_xml_element))
@@ -16,10 +16,10 @@ def add_xml_shape(xml, complete_xml_element):
 def infinite_plane(shape_id, plane_pt, normal_pt):
     """
         Generates xml code for an infinite plane
-        @param shape_id: a string to refer to the shape by
-        @param plane_pt: a point in the plane
-        @param normal_pt: the direction of a normal to the plane
-        @return the xml string
+        :param shape_id: a string to refer to the shape by
+        :param plane_pt: a point in the plane
+        :param normal_pt: the direction of a normal to the plane
+        :return the xml string
     """
     return '<infinite-plane id="' + str(shape_id) + '">' + \
            '<point-in-plane x="' + str(plane_pt[0]) + '" y="' + str(plane_pt[1]) + '" z="' + \
@@ -32,11 +32,11 @@ def infinite_plane(shape_id, plane_pt, normal_pt):
 def infinite_cylinder(centre, radius, axis, shape_id='shape'):
     """
         Generates xml code for an infintely long cylinder
-        @param centre: a tupple for a point on the axis
-        @param radius: cylinder radius
-        @param axis: cylinder orientation
-        @param shape_id: a string to refer to the shape by
-        @return the xml string
+        :param centre: a tupple for a point on the axis
+        :param radius: cylinder radius
+        :param axis: cylinder orientation
+        :param shape_id: a string to refer to the shape by
+        :return the xml string
     """
     return '<infinite-cylinder id="' + str(shape_id) + '">' + \
            '<centre x="' + str(centre[0]) + '" y="' + str(centre[1]) + '" z="' + str(centre[2]) + '" />' + \
@@ -47,12 +47,12 @@ def infinite_cylinder(centre, radius, axis, shape_id='shape'):
 def finite_cylinder(centre, radius, height, axis, shape_id='shape'):
     """
         Generates xml code for an infintely long cylinder
-        @param centre: a tuple for a point on the axis
-        @param radius: cylinder radius
-        @param height: cylinder height
-        @param axis: cylinder orientation
-        @param shape_id: a string to refer to the shape by
-        @return the xml string
+        :param centre: a tuple for a point on the axis
+        :param radius: cylinder radius
+        :param height: cylinder height
+        :param axis: cylinder orientation
+        :param shape_id: a string to refer to the shape by
+        :return the xml string
     """
     return '<cylinder id="' + str(shape_id) + '">' + \
            '<centre-of-bottom-base x="' + str(centre[0]) + '" y="' + str(centre[1]) + '" z="' + str(centre[2]) + \
diff --git a/scripts/SANS/sans/command_interface/ISISCommandInterface.py b/scripts/SANS/sans/command_interface/ISISCommandInterface.py
new file mode 100644
index 0000000000000000000000000000000000000000..7487245ad7f125b953c827212a496199a6e67cae
--- /dev/null
+++ b/scripts/SANS/sans/command_interface/ISISCommandInterface.py
@@ -0,0 +1,1070 @@
+from __future__ import (absolute_import, division, print_function)
+import re
+import inspect
+from six import types
+from mantid.kernel import config
+from mantid.api import (AnalysisDataService, WorkspaceGroup)
+from SANSadd2 import add_runs
+from sans.sans_batch import SANSBatchReduction
+from sans.command_interface.command_interface_functions import (print_message, warning_message)
+from sans.command_interface.command_interface_state_director import (CommandInterfaceStateDirector, DataCommand,
+                                                                     DataCommandId, NParameterCommand, NParameterCommandId,
+                                                                     FitData)
+from sans.command_interface.batch_csv_file_parser import BatchCsvParser
+from sans.common.constants import ALL_PERIODS
+from sans.common.file_information import (find_sans_file, find_full_file_path)
+from sans.common.enums import (DetectorType, FitType, RangeStepType, ReductionDimensionality,
+                               ISISReductionMode, SANSFacility, SaveType, BatchReductionEntry, OutputMode)
+from sans.common.general_functions import (convert_bank_name_to_detector_type_isis, get_output_name,
+                                           is_part_of_reduced_output_workspace_group)
+
+# Disable plotting if running outside Mantidplot
+try:
+    import mantidplot
+except (Exception, Warning):
+    mantidplot = None
+    # this should happen when this is called from outside Mantidplot and only then,
+    # the result is that attempting to plot will raise an exception
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Globals
+# ----------------------------------------------------------------------------------------------------------------------
+DefaultTrans = True
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# CommandInterfaceStateDirector global instance
+# ----------------------------------------------------------------------------------------------------------------------
+director = CommandInterfaceStateDirector(SANSFacility.ISIS)
+
+
+def deprecated(obj):
+    """
+    Decorator to apply to functions or classes that we think are not being (or
+    should not be) used anymore.  Prints a warning to the log.
+    """
+    if inspect.isfunction(obj) or inspect.ismethod(obj):
+        if inspect.isfunction(obj):
+            obj_desc = "\"%s\" function" % obj.__name__
+        else:
+            obj_desc = "\"%s\" class" % obj.__self__.__class__.__name__
+
+        def print_warning_wrapper(*args, **kwargs):
+            warning_message("The {0} has been marked as deprecated and may be "
+                            "removed in a future version of Mantid. If you "
+                            "believe this to have been marked in error, please "
+                            "contact the member of the Mantid team responsible "
+                            "for ISIS SANS.".format(obj_desc))
+            return obj(*args, **kwargs)
+        return print_warning_wrapper
+
+    # Add a @deprecated decorator to each of the member functions in the class
+    # (by recursion).
+    if inspect.isclass(obj):
+        for name, fn in inspect.getmembers(obj):
+            if isinstance(fn, types.MethodType):
+                setattr(obj, name, deprecated(fn))
+        return obj
+
+    assert False, "Programming error.  You have incorrectly applied the "\
+                  "@deprecated decorator.  This is only for use with functions "\
+                  "or classes."
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Unnecessary commands
+# ----------------------------------------------------------------------------------------------------------------------
+def SetVerboseMode(state):
+    pass
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Setting instruments
+# ----------------------------------------------------------------------------------------------------------------------
+def SANS2D(idf_path=None):
+    config['default.instrument'] = 'SANS2D'
+
+
+def SANS2DTUBES():
+    config['default.instrument'] = 'SANS2D'
+
+
+def LOQ(idf_path='LOQ_Definition_20020226-.xml'):
+    config['default.instrument'] = 'LOQ'
+
+
+def LARMOR(idf_path = None):
+    config['default.instrument'] = 'LARMOR'
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Unused commands
+# ----------------------------------------------------------------------------------------------------------------------
+@deprecated
+def _SetWavelengthRange(start, end):
+    _ = start  # noqa
+    _ = end  # noqa
+    pass
+
+
+@deprecated
+def Reduce():
+    pass
+
+
+@deprecated
+def GetMismatchedDetList():
+    pass
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Currently not implemented commands
+# ----------------------------------------------------------------------------------------------------------------------
+def TransWorkspace(sample, can=None):
+    """
+        Use a given workpspace that contains pre-calculated transmissions
+        @param sample the workspace to use for the sample
+        @param can calculated transmission for the can
+    """
+    _, _ = sample, can  # noqa
+    raise NotImplementedError("The TransWorkspace command is not implemented in SANS v2.")
+
+
+def createColetteScript(inputdata, format, reduced, centreit, plotresults, csvfile='', savepath=''):
+    _, _, _, _, _, _, _ = inputdata, format, reduced, centreit, plotresults, csvfile, savepath  # noqa
+    raise NotImplementedError("The creatColleteScript command is not implemented in SANS v2.")
+
+
+def FindBeamCentre(rlow, rupp, MaxIter=10, xstart=None, ystart=None, tolerance=1.251e-4,  find_direction=None):
+    _, _, _, _, _, _, _ = rlow, rupp, MaxIter, xstart, ystart, tolerance, find_direction  # noqa
+    raise NotImplementedError("The FindBeamCentre command is not implemented in SANS v2.")
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Data related commands
+# ----------------------------------------------------------------------------------------------------------------------
+def AssignSample(sample_run, reload=True, period=ALL_PERIODS):
+    """
+    Sets the sample scatter data.
+
+    @param sample_run: run number to analysis e.g. SANS2D7777.nxs
+    @param reload: must be set to True
+    @param period: the period (entry) number to load, default is the first period
+    """
+    _ = reload  # noqa
+    # First of all the default for all periods used to be -1. If we encounter this then set periods to ALL_PERIODS
+    period = int(period)
+    period = ALL_PERIODS if period == -1 else period
+
+    # Print the output
+    message = 'AssignSample("' + str(sample_run) + '"'
+    if period != ALL_PERIODS:
+        message += ', ' + str(period)
+    message += ')'
+    print_message(message)
+
+    # Get the full file name of the run
+    file_name = find_sans_file(sample_run)
+
+    # Set the command
+    data_command = DataCommand(command_id=DataCommandId.sample_scatter, file_name=file_name, period=period)
+    director.add_command(data_command)
+
+
+def AssignCan(can_run, reload=True, period=ALL_PERIODS):
+    """
+    Sets the can scatter data.
+
+    @param can_run: run number to analysis e.g. SANS2D7777.nxs
+    @param reload: must be set to True
+    @param period: the period (entry) number to load, default is the first period
+    """
+    _ = reload  # noqa
+    # First of all the default for all periods used to be -1. If we encounter this then set periods to ALL_PERIODS
+    period = int(period)
+    period = ALL_PERIODS if period == -1 else period
+
+    # Print the output
+    message = 'AssignCan("' + str(can_run) + '"'
+    if period != ALL_PERIODS:
+        message += ', ' + str(period)
+    message += ')'
+    print_message(message)
+
+    # Get the full file name of the run
+    file_name = find_sans_file(can_run)
+
+    # Set the command
+    data_command = DataCommand(command_id=DataCommandId.can_scatter, file_name=file_name, period=period)
+    director.add_command(data_command)
+
+
+def TransmissionSample(sample, direct, reload=True,
+                       period_t=ALL_PERIODS, period_d=ALL_PERIODS):
+    """
+    Specify the transmission and direct runs for the sample.
+
+    @param sample: the transmission run
+    @param direct: direct run
+    @param reload: if to replace the workspace if it is already there
+    @param period_t: the entry number of the transmission run (default single entry file)
+    @param period_d: the entry number of the direct run (default single entry file)
+    """
+    _ = reload  # noqa
+    # First of all the default for all periods used to be -1. If we encounter this then set periods to ALL_PERIODS
+    period_t = int(period_t)
+    period_d = int(period_d)
+    period_t = ALL_PERIODS if period_t == -1 else period_t
+    period_d = ALL_PERIODS if period_d == -1 else period_d
+
+    print_message('TransmissionSample("' + str(sample) + '","' + str(direct) + '")')
+
+    # Get the full file name of the run
+    trans_file_name = find_sans_file(sample)
+    direct_file_name = find_sans_file(direct)
+
+    # Set the command
+    trans_command = DataCommand(command_id=DataCommandId.sample_transmission, file_name=trans_file_name,
+                                period=period_t)
+    direct_command = DataCommand(command_id=DataCommandId.sample_direct, file_name=direct_file_name, period=period_d)
+    director.add_command(trans_command)
+    director.add_command(direct_command)
+
+
+def TransmissionCan(can, direct, reload=True, period_t=-1, period_d=-1):
+    """
+    Specify the transmission and direct runs for the can
+    @param can: the transmission run
+    @param direct: direct run
+    @param reload: if to replace the workspace if it is already there
+    @param period_t: the entry number of the transmission run (default single entry file)
+    @param period_d: the entry number of the direct run (default single entry file)
+    """
+    _ = reload  # noqa
+    # First of all the default for all periods used to be -1. If we encounter this then set periods to ALL_PERIODS
+    period_t = int(period_t)
+    period_d = int(period_d)
+    period_t = ALL_PERIODS if period_t == -1 else period_t
+    period_d = ALL_PERIODS if period_d == -1 else period_d
+
+    print_message('TransmissionCan("' + str(can) + '","' + str(direct) + '")')
+
+    # Get the full file name of the run
+    trans_file_name = find_sans_file(can)
+    direct_file_name = find_sans_file(direct)
+
+    # Set the command
+    trans_command = DataCommand(command_id=DataCommandId.can_transmission, file_name=trans_file_name, period=period_t)
+    direct_command = DataCommand(command_id=DataCommandId.can_direct, file_name=direct_file_name, period=period_d)
+    director.add_command(trans_command)
+    director.add_command(direct_command)
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# N parameter commands
+# ----------------------------------------------------------------------------------------------------------------------
+
+
+# ------------------------
+# Zero parameters
+# ------------------------
+def Clean():
+    """
+    Removes all previous settings.
+    """
+    clean_command = NParameterCommand(command_id=NParameterCommandId.clean, values=[])
+    director.add_command(clean_command)
+
+
+def Set1D():
+    """
+    Sets the reduction dimensionality to 1D
+    """
+    print_message('Set1D()')
+    set_1d_command = NParameterCommand(command_id=NParameterCommandId.reduction_dimensionality,
+                                       values=[ReductionDimensionality.OneDim])
+    director.add_command(set_1d_command)
+
+
+def Set2D():
+    """
+    Sets the reduction dimensionality to 2D
+    """
+    print_message('Set2D()')
+    set_2d_command = NParameterCommand(command_id=NParameterCommandId.reduction_dimensionality,
+                                       values=[ReductionDimensionality.TwoDim])
+    director.add_command(set_2d_command)
+
+
+def UseCompatibilityMode():
+    """
+    Sets the compatibility mode to True
+    """
+    set_2d_command = NParameterCommand(command_id=NParameterCommandId.compatibility_mode,
+                                       values=[True])
+    director.add_command(set_2d_command)
+
+
+# -------------------------
+# Single parameter commands
+# -------------------------
+def MaskFile(file_name):
+    """
+    Loads the user file (note that mask file is the legacy description user file)
+
+    @param file_name: path to the user file.
+    """
+    print_message('#Opening "' + file_name + '"')
+
+    # Get the full file path
+    file_name_full = find_full_file_path(file_name)
+    user_file_command = NParameterCommand(command_id=NParameterCommandId.user_file, values=[file_name_full])
+    director.add_command(user_file_command)
+
+
+def Mask(details):
+    """
+    Allows the user to specify a mask command as is done in the user file.
+
+    @param details: a string that specifies masking as it would appear in a mask file
+    """
+    print_message('Mask("' + details + '")')
+    mask_command = NParameterCommand(command_id=NParameterCommandId.mask, values=[details])
+    director.add_command(mask_command)
+
+
+def SetSampleOffset(value):
+    """
+    Set the sample offset.
+
+    @param value: the offset in mm
+    """
+    value = float(value)
+    sample_offset_command = NParameterCommand(command_id=NParameterCommandId.sample_offset, values=[value])
+    director.add_command(sample_offset_command)
+
+
+def Detector(det_name):
+    """
+    Sets the detector which is being used for the reduction.
+
+    Previous comment: Sets the detector bank to use for the reduction e.g. 'front-detector'. The main detector is
+     assumed if this line is not given
+    @param det_name: the detector's name
+    """
+    print_message('Detector("' + det_name + '")')
+    detector_type = convert_bank_name_to_detector_type_isis(det_name)
+    reduction_mode = ISISReductionMode.HAB if detector_type is DetectorType.HAB else ISISReductionMode.LAB
+    detector_command = NParameterCommand(command_id=NParameterCommandId.detector, values=[reduction_mode])
+    director.add_command(detector_command)
+
+
+def SetEventSlices(input_str):
+    """
+    Sets the events slices
+    """
+    event_slices_command = NParameterCommand(command_id=NParameterCommandId.event_slices, values=input_str)
+    director.add_command(event_slices_command)
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Double valued commands
+# ----------------------------------------------------------------------------------------------------------------------
+def SetMonitorSpectrum(specNum, interp=False):
+    """
+    Specifies the spectrum number of the spectrum that will be used to for monitor normalisation
+    @param specNum: a spectrum number (1 or greater)
+    @param interp: when rebinning the wavelength bins to match the main workspace, if use interpolation
+                   default no interpolation
+    """
+    specNum = int(specNum)
+    monitor_spectrum_command = NParameterCommand(command_id=NParameterCommandId.monitor_spectrum, values=[specNum,
+                                                                                                          interp])
+    director.add_command(monitor_spectrum_command)
+
+
+def SetTransSpectrum(specNum, interp=False):
+    """
+    Sets the spectrum number (of the incident monitor) and the interpolation configuration for transmission calculation.
+
+    @param specNum: a spectrum number (1 or greater)
+    @param interp: when rebinning the wavelength bins to match the main workspace, if use interpolation
+                   default no interpolation
+    """
+    specNum = int(specNum)
+    transmission_spectrum_command = NParameterCommand(command_id=NParameterCommandId.transmission_spectrum,
+                                                      values=[specNum, interp])
+    director.add_command(transmission_spectrum_command)
+
+
+def Gravity(flag, extra_length=0.0):
+    """
+    Allows the user to set the gravity correction for the q conversion.
+    @param flag: set to True if the correction should be used, else False.
+    @param extra_length: the extra length in meter.
+    @return:
+    """
+    extra_length = float(extra_length)
+    print_message('Gravity(' + str(flag) + ', ' + str(extra_length) + ')')
+    gravity_command = NParameterCommand(command_id=NParameterCommandId.gravity, values=[flag, extra_length])
+    director.add_command(gravity_command)
+
+
+def SetDetectorFloodFile(filename, detector_name="REAR"):
+    """
+    Sets the pixel correction file for a particular detector
+
+    @param filename: the name of the file.
+    @param detector_name: the name of the detector
+    """
+    file_name = find_full_file_path(filename)
+    detector_name = convert_bank_name_to_detector_type_isis(detector_name)
+    flood_command = NParameterCommand(command_id=NParameterCommandId.flood_file, values=[file_name, detector_name])
+    director.add_command(flood_command)
+
+
+def SetCorrectionFile(bank, filename):
+    # 10/03/15 RKH, create a new routine that allows change of "direct beam file" = correction file,
+    # for a given detector, this simplify the iterative process used to adjust it.
+    # Will still have to keep changing the name of the file
+    # for each iteratiom to avoid Mantid using a cached version, but can then use
+    # only a single user (=mask) file for each set of iterations.
+    # Modelled this on SetDetectorOffsets above ...
+    """
+        @param bank: Must be either 'front' or 'rear' (not case sensitive)
+        @param filename: self explanatory
+    """
+    print_message("SetCorrectionFile(" + str(bank) + ', ' + filename + ')')
+    detector_type = convert_bank_name_to_detector_type_isis(bank)
+    file_name = find_full_file_path(filename)
+    flood_command = NParameterCommand(command_id=NParameterCommandId.wavelength_correction_file,
+                                      values=[file_name, detector_type])
+    director.add_command(flood_command)
+
+
+# --------------------------
+# Three parameter commands
+# ---------------------------
+def SetCentre(xcoord, ycoord, bank='rear'):
+    """
+    Configure the Beam Center position. It support the configuration of the centre for
+    both detectors bank (low-angle bank and high-angle bank detectors)
+
+    It allows defining the position for both detector banks.
+    :param xcoord: X position of beam center in the user coordinate system.
+    :param ycoord: Y position of beam center in the user coordinate system.
+    :param bank: The selected bank ('rear' - low angle or 'front' - high angle)
+    Introduced #5942
+    """
+    xcoord = float(xcoord)
+    ycoord = float(ycoord)
+    print_message('SetCentre(' + str(xcoord) + ', ' + str(ycoord) + ')')
+    detector_type = convert_bank_name_to_detector_type_isis(bank)
+    centre_command = NParameterCommand(command_id=NParameterCommandId.centre, values=[xcoord, ycoord, detector_type])
+    director.add_command(centre_command)
+
+
+def SetPhiLimit(phimin, phimax, use_mirror=True):
+    """
+        Call this function to restrict the analyse segments of the detector. Phimin and
+        phimax define the limits of the segment where phi=0 is the -x axis and phi = 90
+        is the y-axis. Setting use_mirror to true includes a second segment to be included
+        it is the same as the first but rotated 180 degrees.
+        @param phimin: the minimum phi angle to include
+        @param phimax: the upper limit on phi for the segment
+        @param use_mirror: when True (default) another segment is included, rotated 180 degrees from the first
+    """
+    print_message("SetPhiLimit(" + str(phimin) + ', ' + str(phimax) + ',use_mirror=' + str(use_mirror) + ')')
+    # a beam centre of [0,0,0] makes sense if the detector has been moved such that beam centre is at [0,0,0]
+    phimin = float(phimin)
+    phimax = float(phimax)
+    centre_command = NParameterCommand(command_id=NParameterCommandId.phi_limit, values=[phimin, phimax, use_mirror])
+    director.add_command(centre_command)
+
+
+def set_save(save_algorithms, save_as_zero_error_free):
+    """
+    Mainly internally used by BatchMode. Provides the save settings.
+
+    @param save_algorithms: A list of SaveType enums.
+    @param save_as_zero_error_free: True if a zero error correction should be performed.
+    """
+    save_command = NParameterCommand(command_id=NParameterCommandId.save, values=[save_algorithms,
+                                                                                  save_as_zero_error_free])
+    director.add_command(save_command)
+
+
+# --------------------------
+# Four parameter commands
+# ---------------------------
+def TransFit(mode, lambdamin=None, lambdamax=None, selector='BOTH'):
+    """
+        Sets the fit method to calculate the transmission fit and the wavelength range
+        over which to do the fit. These arguments are passed to the algorithm
+        CalculateTransmission. If mode is set to 'Off' then the unfitted workspace is
+        used and lambdamin and max have no effect
+        @param mode: can be 'Logarithmic' ('YLOG', 'LOG') 'OFF' ('CLEAR') or 'LINEAR' (STRAIGHT', LIN'),
+                     'POLYNOMIAL2', 'POLYNOMIAL3', ...
+        @param lambdamin: the lowest wavelength to use in any fit
+        @param lambdamax: the end of the fit range
+        @param selector: define for which transmission this fit specification is valid (BOTH, SAMPLE, CAN)
+    """
+    def does_pattern_match(compiled_regex, line):
+        return compiled_regex.match(line) is not None
+
+    def extract_polynomial_order(line):
+        order = re.sub("POLYNOMIAL", "", line)
+        order = order.strip()
+        return int(order)
+
+    polynomial_pattern = re.compile("\\s*" + "POLYNOMIAL" + "\\s*[2-9]")
+    polynomial_order = None
+    # Get the fit mode
+    mode = str(mode).strip().upper()
+
+    if mode == "LINEAR" or mode == "STRAIGHT" or mode == "LIN":
+        fit_type = FitType.Linear
+    elif mode == "LOGARITHMIC" or mode == "LOG" or mode == "YLOG":
+        fit_type = FitType.Log
+    elif does_pattern_match(polynomial_pattern, mode):
+        fit_type = FitType.Polynomial
+        polynomial_order = extract_polynomial_order(mode)
+    else:
+        fit_type = FitType.NoFit
+
+    # Get the selected detector to which the fit settings apply
+    selector = str(selector).strip().upper()
+    if selector == "SAMPLE":
+        fit_data = FitData.Sample
+    elif selector == "CAN":
+        fit_data = FitData.Can
+    elif selector == "BOTH":
+        fit_data = FitData.Both
+    else:
+        raise RuntimeError("TransFit: The selected fit data {0} is not valid. You have to either SAMPLE, "
+                           "CAN or BOTH.".format(selector))
+
+    # Output message
+    message = mode
+    if lambdamin:
+        lambdamin = float(lambdamin)
+        message += ', ' + str(lambdamin)
+    if lambdamax:
+        lambdamax = float(lambdamax)
+        message += ', ' + str(lambdamax)
+    message += ', selector=' + selector
+    print_message("TransFit(\"" + message + "\")")
+
+    # Configure fit settings
+    polynomial_order = polynomial_order if polynomial_order is not None else 0
+    fit_command = NParameterCommand(command_id=NParameterCommandId.centre, values=[fit_data, lambdamin, lambdamax,
+                                                                                   fit_type, polynomial_order])
+    director.add_command(fit_command)
+
+
+def LimitsR(rmin, rmax, quiet=False, reducer=None):
+    """
+    Sets the radius limits
+
+    @param rmin: minimal radius in mm
+    @param rmax: maximal radius in mm
+    @param quiet: if True then no message will be logged.
+    @param reducer: legacy parameter
+    """
+    _ = reducer  # noqa
+    rmin = float(rmin)
+    rmax = float(rmax)
+    if not quiet:
+        print_message('LimitsR(' + str(rmin) + ', ' + str(rmax) + ')', reducer)
+    rmin /= 1000.
+    rmax /= 1000.
+    radius_command = NParameterCommand(command_id=NParameterCommandId.mask_radius, values=[rmin, rmax])
+    director.add_command(radius_command)
+
+
+def LimitsWav(lmin, lmax, step, bin_type):
+    """
+    Set the wavelength limits
+
+    @param lmin: the lower wavelength bound.
+    @param lmax: the upper wavelength bound.
+    @param step: the wavelength step.
+    @param bin_type: teh bin type, ie linear or logarithmic. Accepted strings are "LINEAR" and "LOGARITHMIC"
+    """
+    lmin = float(lmin)
+    lmax = float(lmax)
+    step = float(step)
+
+    print_message('LimitsWav(' + str(lmin) + ', ' + str(lmax) + ', ' + str(step) + ', ' + bin_type + ')')
+
+    rebin_string = bin_type.strip().upper()
+    rebin_type = RangeStepType.Log if rebin_string == "LOGARITHMIC" else RangeStepType.Lin
+
+    wavelength_command = NParameterCommand(command_id=NParameterCommandId.wavelength_limit,
+                                           values=[lmin, lmax, step, rebin_type])
+    director.add_command(wavelength_command)
+
+
+def LimitsQXY(qmin, qmax, step, type):
+    """
+        To set the bin parameters for the algorithm Qxy()
+        @param qmin: the first Q value to include
+        @param qmaz: the last Q value to include
+        @param step: bin width
+        @param type: pass LOG for logarithmic binning
+    """
+    qmin = float(qmin)
+    qmax = float(qmax)
+    step = float(step)
+
+    print_message('LimitsQXY(' + str(qmin) + ', ' + str(qmax) + ', ' + str(step) + ', ' + str(type) + ')')
+    step_type_string = type.strip().upper()
+    if step_type_string == "LOGARITHMIC" or step_type_string == "LOG":
+        step_type = RangeStepType.Log
+    else:
+        step_type = RangeStepType.Lin
+    qxy_command = NParameterCommand(command_id=NParameterCommandId.qxy_limit, values=[qmin, qmax, step, step_type])
+    director.add_command(qxy_command)
+
+
+# --------------------------
+# Six parameter commands
+# --------------------------
+def SetFrontDetRescaleShift(scale=1.0, shift=0.0, fitScale=False, fitShift=False, qMin=None, qMax=None):
+    """
+        Stores property about the detector which is used to rescale and shift
+        data in the bank after data have been reduced
+        @param scale: Default to 1.0. Value to multiply data with
+        @param shift: Default to 0.0. Value to add to data
+        @param fitScale: Default is False. Whether or not to try and fit this param
+        @param fitShift: Default is False. Whether or not to try and fit this param
+        @param qMin: When set to None (default) then for fitting use the overlapping q region of
+                     front and rear detectors
+        @param qMax: When set to None (default) then for fitting use the overlapping q region of
+                     front and rear detectors
+    """
+    scale = float(scale)
+    shift = float(shift)
+
+    if qMin:
+        qMin = float(qMin)
+    if qMax:
+        qMax = float(qMax)
+
+    print_message('Set front detector rescale/shift values to {0} and {1}'.format(scale, shift))
+    front_command = NParameterCommand(command_id=NParameterCommandId.front_detector_rescale, values=[scale, shift,
+                                                                                                     fitScale, fitShift,
+                                                                                                     qMin, qMax])
+    director.add_command(front_command)
+
+
+def SetDetectorOffsets(bank, x, y, z, rot, radius, side, xtilt=0.0, ytilt=0.0):
+    """
+        Adjust detector position away from position defined in IDF. On SANS2D the detector
+        banks can be moved around. This method allows fine adjustments of detector bank position
+        in the same way as the DET/CORR userfile command works. Hence please see
+        http://www.mantidproject.org/SANS_User_File_Commands#DET for details.
+
+        The comment below is not true any longer:
+            Note, for now, this command will only have an effect on runs loaded
+            after this command have been executed (because it is when runs are loaded
+            that components are moved away from the positions set in the IDF)
+
+
+        @param bank: Must be either 'front' or 'rear' (not case sensitive)
+        @param x: shift in mm
+        @param y: shift in mm
+        @param z: shift in mm
+        @param rot: shift in degrees
+        @param radius: shift in mm
+        @param side: shift in mm
+        @param xtilt: xtilt in degrees
+        @param ytilt: ytilt in degrees
+    """
+    x = float(x)
+    y = float(y)
+    z = float(z)
+    rot = float(rot)
+    radius = float(radius)
+    side = float(side)
+    xtilt = float(xtilt)
+    ytilt = float(ytilt)
+
+    print_message("SetDetectorOffsets(" + str(bank) + ', ' + str(x)
+                  + ',' + str(y) + ',' + str(z) + ',' + str(rot)
+                  + ',' + str(radius) + ',' + str(side) + ',' + str(xtilt) + ',' + str(ytilt) + ')')
+    detector_type = convert_bank_name_to_detector_type_isis(bank)
+    detector_offsets = NParameterCommand(command_id=NParameterCommandId.detector_offsets, values=[detector_type,
+                                                                                                  x, y, z,
+                                                                                                  rot, radius, side,
+                                                                                                  xtilt, ytilt])
+    director.add_command(detector_offsets)
+
+
+# --------------------------------------------
+# Commands which actually kick off a reduction
+# --------------------------------------------
+def WavRangeReduction(wav_start=None, wav_end=None, full_trans_wav=None, name_suffix=None, combineDet=None,
+                      resetSetup=True, out_fit_settings=None, output_name=None, output_mode=OutputMode.PublishToADS,
+                      use_reduction_mode_as_suffix=False):
+    """
+        Run reduction from loading the raw data to calculating Q. Its optional arguments allows specifics
+        details to be adjusted, and optionally the old setup is reset at the end. Note if FIT of RESCALE or SHIFT
+        is selected then both REAR and FRONT detectors are both reduced EXCEPT if only the REAR detector is selected
+        to be reduced
+
+        @param wav_start: the first wavelength to be in the output data
+        @param wav_end: the last wavelength in the output data
+        @param full_trans_wav: if to use a wide wavelength range, the instrument's default wavelength range,
+                               for the transmission correction, false by default
+        @param name_suffix: append the created output workspace with this
+        @param combineDet: combineDet can be one of the following:
+                           'rear'                (run one reduction for the 'rear' detector data)
+                           'front'               (run one reduction for the 'front' detector data, and
+                                                  rescale+shift 'front' data)
+                           'both'                (run both the above two reductions)
+                           'merged'              (run the same reductions as 'both' and additionally create
+                                                  a merged data workspace)
+                            None                 (run one reduction for whatever detector has been set as the
+                                                  current detector
+                                                  before running this method. If front apply rescale+shift)
+        @param resetSetup: if true reset setup at the end
+        @param out_fit_settings: An output parameter. It is used, specially when resetSetup is True, in order
+                                 to remember the 'scale and fit' of the fitting algorithm.
+        @param output_name: name of the output workspace/file, if none is specified then one is generated internally.
+        @param output_mode: the way the data should be put out: Can be PublishToADS, SaveToFile or Both
+        @param use_reduction_mode_as_suffix: If true then a second suffix will be used which is
+                                             based on the reduction mode.
+        @return Name of one of the workspaces created
+    """
+    print_message('WavRangeReduction(' + str(wav_start) + ', ' + str(wav_end) + ', ' + str(full_trans_wav) + ')')
+    _ = resetSetup
+    _ = out_fit_settings
+
+    # Set the provided parameters
+    if combineDet is None:
+        reduction_mode = None
+    elif combineDet == 'rear':
+        reduction_mode = ISISReductionMode.LAB
+    elif combineDet == "front":
+        reduction_mode = ISISReductionMode.HAB
+    elif combineDet == "merged":
+        reduction_mode = ISISReductionMode.Merged
+    elif combineDet == "both":
+        reduction_mode = ISISReductionMode.All
+    else:
+        raise RuntimeError("WavRangeReduction: The combineDet input parameter was given a value of {0}. rear, front,"
+                           " both, merged and no input are allowed".format(combineDet))
+
+    if wav_start is not None:
+        wav_start = float(wav_start)
+
+    if wav_end is not None:
+        wav_end = float(wav_end)
+
+    wavelength_command = NParameterCommand(command_id=NParameterCommandId.wavrange_settings,
+                                           values=[wav_start, wav_end, full_trans_wav, reduction_mode])
+    director.add_command(wavelength_command)
+
+    # Save options
+    if output_name is not None:
+        director.add_command(NParameterCommand(command_id=NParameterCommandId.user_specified_output_name,
+                                               values=[output_name]))
+    if name_suffix is not None:
+        director.add_command(NParameterCommand(command_id=NParameterCommandId.user_specified_output_name_suffix,
+                                               values=[name_suffix]))
+    if use_reduction_mode_as_suffix:
+        director.add_command(NParameterCommand(command_id=NParameterCommandId.use_reduction_mode_as_suffix,
+                                               values=[use_reduction_mode_as_suffix]))
+
+    # Get the states
+    state = director.process_commands()
+
+    # Run the reduction
+    batch_alg = SANSBatchReduction()
+    batch_alg(states=[state], use_optimizations=True, output_mode=output_mode)
+
+    # -----------------------------------------------------------
+    # Return the name fo the reduced workspace (or WorkspaceGroup)
+    # -----------------------------------------------------------
+    reduction_mode = state.reduction.reduction_mode
+    is_group = is_part_of_reduced_output_workspace_group(state)
+    _, output_workspace_base_name = get_output_name(state, reduction_mode, is_group)
+    return output_workspace_base_name
+
+
+def BatchReduce(filename, format, plotresults=False, saveAlgs=None, verbose=False,  # noqa
+                centreit=False, reducer=None, combineDet=None, save_as_zero_error_free=False):  # noqa
+    """
+        @param filename: the CSV file with the list of runs to analyse
+        @param format: type of file to load, nxs for Nexus, etc.
+        @param plotresults: if true and this function is run from Mantidplot a graph will be created for the results of each reduction
+        @param saveAlgs: this named algorithm will be passed the name of the results workspace and filename (default = 'SaveRKH').
+            Pass a tuple of strings to save to multiple file formats
+        @param verbose: set to true to write more information to the log (default=False)
+        @param centreit: do centre finding (default=False)
+        @param reducer: if to use the command line (default) or GUI reducer object
+        @param combineDet: that will be forward to WavRangeReduction (rear, front, both, merged, None)
+        @param save_as_zero_error_free: Should the reduced workspaces contain zero errors or not
+        @return final_setings: A dictionary with some values of the Reduction - Right Now:(scale, shift)
+    """
+    if saveAlgs is None:
+        saveAlgs = {'SaveRKH': 'txt'}
+
+    # From the old interface
+    _ = format
+    _ = reducer
+    _ = verbose
+
+    if centreit:
+        raise RuntimeError("The beam centre finder is currently not supported.")
+    if plotresults:
+        raise RuntimeError("Plotting the results is currenlty not supported.")
+
+    # Set up the save algorithms
+    save_algs = []
+
+    if saveAlgs:
+        for key, _ in list(saveAlgs.items()):
+            if key == "SaveRKH":
+                save_algs.append(SaveType.RKH)
+            elif key == "SaveNexus":
+                save_algs.append(SaveType.Nexus)
+            elif key == "SaveNistQxy":
+                save_algs.append(SaveType.NistQxy)
+            elif key == "SaveCanSAS" or key == "SaveCanSAS1D":
+                save_algs.append(SaveType.CanSAS)
+            elif key == "SaveCSV":
+                save_algs.append(SaveType.CSV)
+            elif key == "SaveNXcanSAS":
+                save_algs.append(SaveType.NXcanSAS)
+            else:
+                raise RuntimeError("The save format {0} is not known.".format(key))
+        output_mode = OutputMode.Both
+    else:
+        output_mode = OutputMode.PublishToADS
+
+    # Get the information from the csv file
+    batch_csv_parser = BatchCsvParser(filename)
+    parsed_batch_entries = batch_csv_parser.parse_batch_file()
+
+    # Get a state with all existing settings
+    for parsed_batch_entry in parsed_batch_entries:
+        # A new user file. If a new user file is provided then this will overwrite all other settings from,
+        # otherwise we might have cross-talk between user files.
+        if BatchReductionEntry.UserFile in list(parsed_batch_entry.keys()):
+            user_file = parsed_batch_entry[BatchReductionEntry.UserFile]
+            MaskFile(user_file)
+
+        # Sample scatter
+        sample_scatter = parsed_batch_entry[BatchReductionEntry.SampleScatter]
+        sample_scatter_period = parsed_batch_entry[BatchReductionEntry.SampleScatterPeriod]
+        AssignSample(sample_run=sample_scatter, period=sample_scatter_period)
+
+        # Sample transmission
+        if (BatchReductionEntry.SampleTransmission in list(parsed_batch_entry.keys()) and
+           BatchReductionEntry.SampleDirect in list(parsed_batch_entry.keys())):
+            sample_transmission = parsed_batch_entry[BatchReductionEntry.SampleTransmission]
+            sample_transmission_period = parsed_batch_entry[BatchReductionEntry.SampleTransmissionPeriod]
+            sample_direct = parsed_batch_entry[BatchReductionEntry.SampleDirect]
+            sample_direct_period = parsed_batch_entry[BatchReductionEntry.SampleDirectPeriod]
+            TransmissionSample(sample=sample_transmission, direct=sample_direct,
+                               period_t=sample_transmission_period, period_d=sample_direct_period)
+
+        # Can scatter
+        if BatchReductionEntry.CanScatter in list(parsed_batch_entry.keys()):
+            can_scatter = parsed_batch_entry[BatchReductionEntry.CanScatter]
+            can_scatter_period = parsed_batch_entry[BatchReductionEntry.CanScatterPeriod]
+            AssignCan(can_run=can_scatter, period=can_scatter_period)
+
+        # Can transmission
+        if (BatchReductionEntry.CanTransmission in list(parsed_batch_entry.keys()) and
+           BatchReductionEntry.CanDirect in list(parsed_batch_entry.keys())):
+            can_transmission = parsed_batch_entry[BatchReductionEntry.CanTransmission]
+            can_transmission_period = parsed_batch_entry[BatchReductionEntry.CanTransmissionPeriod]
+            can_direct = parsed_batch_entry[BatchReductionEntry.CanDirect]
+            can_direct_period = parsed_batch_entry[BatchReductionEntry.CanDirectPeriod]
+            TransmissionCan(can=can_transmission, direct=can_direct,
+                            period_t=can_transmission_period, period_d=can_direct_period)
+
+        # Name of the output. We need to modify the name according to the setup of the old reduction mechanism
+        output_name = parsed_batch_entry[BatchReductionEntry.Output]
+
+        # In addition to the output name the user can specify with combineDet an additional suffix (in addtion to the
+        # suffix that the user can set already -- was there previously, so we have to provide that)
+        use_reduction_mode_as_suffix = combineDet is not None
+
+        # Apply save options
+        if save_algs:
+            set_save(save_algorithms=save_algs, save_as_zero_error_free=save_as_zero_error_free)
+
+        # Run the reduction for a single
+        reduced_workspace_name = WavRangeReduction(combineDet=combineDet, output_name=output_name,
+                                                   output_mode=output_mode,
+                                                   use_reduction_mode_as_suffix=use_reduction_mode_as_suffix)
+
+        # Remove the settings which were very specific for this single reduction which are:
+        # 1. The last user file (if any was set)
+        # 2. The last scatter entry
+        # 3. The last scatter transmission and direct entry (if any were set)
+        # 4. The last can scatter ( if any was set)
+        # 5. The last can transmission and direct entry (if any were set)
+        if BatchReductionEntry.UserFile in list(parsed_batch_entry.keys()):
+            director.remove_last_user_file()
+        director.remove_last_scatter_sample()
+
+        if (BatchReductionEntry.SampleTransmission in list(parsed_batch_entry.keys()) and
+            BatchReductionEntry.SampleDirect in list(parsed_batch_entry.keys())):  # noqa
+            director.remove_last_sample_transmission_and_direct()
+
+        if BatchReductionEntry.CanScatter in list(parsed_batch_entry.keys()):
+            director.remove_last_scatter_can()
+
+        if (BatchReductionEntry.CanTransmission in list(parsed_batch_entry.keys()) and
+           BatchReductionEntry.CanDirect in list(parsed_batch_entry.keys())):
+            director.remove_last_can_transmission_and_direct()
+
+        # Plot the results if that was requested, the flag 1 is from the old version.
+        if plotresults == 1:
+            if AnalysisDataService.doesExist(reduced_workspace_name):
+                workspace = AnalysisDataService.retrieve(reduced_workspace_name)
+                if isinstance(workspace, WorkspaceGroup):
+                    for ws in workspace:
+                        PlotResult(ws.getName())
+                else:
+                    PlotResult(workspace.getName())
+
+
+def CompWavRanges(wavelens, plot=True, combineDet=None, resetSetup=True):
+    """
+        Compares the momentum transfer results calculated from different wavelength ranges. Given
+        the list of wave ranges [a, b, c] it reduces for wavelengths a-b, b-c and a-c.
+        @param wavelens: the list of wavelength ranges
+        @param plot: set this to true to plot the result (must be run in Mantid), default is true
+        @param combineDet: see description in WavRangeReduction
+        @param resetSetup: if true reset setup at the end
+    """
+
+    print_message('CompWavRanges( %s,plot=%s)' % (str(wavelens), plot))
+
+    if not isinstance(wavelens, list) or len(wavelens) < 2:
+        if not isinstance(wavelens, tuple):
+            raise RuntimeError(
+                'Error CompWavRanges() requires a list of wavelengths between which '
+                'reductions will be performed.')
+
+    # Perform a reduction over the full wavelength range which was specified
+    reduced_workspace_names = []
+
+    for index in range(len(wavelens)):
+        wavelens[index] = float(wavelens[index])
+
+    full_reduction_name = WavRangeReduction(wav_start=wavelens[0], wav_end=wavelens[- 1],
+                                            combineDet=combineDet, resetSetup=False)
+    reduced_workspace_names.append(full_reduction_name)
+
+    # Reduce each wavelength slice
+    for i in range(0, len(wavelens) - 1):
+        reduced_workspace_name = WavRangeReduction(wav_start=wavelens[i], wav_end=wavelens[i + 1],
+                                                   combineDet=combineDet, resetSetup=False)
+        reduced_workspace_names.append(reduced_workspace_name)
+
+    if plot and mantidplot:
+        mantidplot.plotSpectrum(reduced_workspace_names, 0)
+
+    # Return just the workspace name of the full range
+    return reduced_workspace_names[0]
+
+
+def PhiRanges(phis, plot=True):
+    """
+        Given a list of phi ranges [a, b, c, d] it reduces in the phi ranges a-b and c-d
+        @param phis: the list of phi ranges
+        @param plot: set this to true to plot the result (must be run in Mantid), default is true
+    """
+
+    print_message('PhiRanges( %s,plot=%s)' % (str(phis), plot))
+
+    # todo covert their string into Python array
+
+    if len(phis) % 2 != 0:
+        raise RuntimeError('Phi ranges must be given as pairs')
+
+    reduced_workspace_names = []
+    for i in range(0, len(phis), 2):
+        SetPhiLimit(phis[i], phis[i + 1])
+        reduced_workspace_name = WavRangeReduction()
+        reduced_workspace_names.append(reduced_workspace_name)
+
+    if plot and mantidplot:
+        mantidplot.plotSpectrum(reduced_workspace_names, 0)
+
+    # Return just the workspace name of the full range
+    return reduced_workspace_names[0]
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# General commands
+# ----------------------------------------------------------------------------------------------------------------------
+def PlotResult(workspace, canvas=None):
+    """
+        Draws a graph of the passed workspace. If the workspace is 2D (has many spectra
+        a contour plot is written
+        @param workspace: a workspace name or handle to plot
+        @param canvas: optional handle to an existing graph to write the plot to
+        @return: a handle to the graph that was written to
+    """
+    try:
+        import mantidplot
+        workspace = AnalysisDataService.retrieve(str(workspace))
+        number_of_spectra = workspace[0].getNumberHistograms() if isinstance(workspace, WorkspaceGroup) else\
+            workspace.getNumberHistograms()
+        graph = mantidplot.plotSpectrum(workspace, 0) if number_of_spectra == 1 else \
+            mantidplot.importMatrixWorkspace(workspace.getName()).plotGraph2D()
+
+        if canvas is not None:
+            # we were given a handle to an existing graph, use it
+            mantidplot.mergePlots(canvas, graph)
+            graph = canvas
+        return graph
+    except ImportError:
+        print_message('Plot functions are not available, is this being run from outside Mantidplot?')
+
+
+def AddRuns(runs, instrument='sans2d', saveAsEvent=False, binning="Monitors", isOverlay=False, time_shifts=None,
+            defType='.nxs', rawTypes=('.raw', '.s*', 'add', '.RAW'), lowMem=False):
+    '''
+    Method to expose the add_runs functionality for custom scripting.
+    @param runs: a list with the requested run numbers
+    @param instrument: the name of the selected instrument
+    @param saveAsEvent: when adding event-type data, then this can be stored as event-type data
+    @param binning: where to get the binnings from. This is relevant when adding Event-type data.
+                    The property can be set to "Monitors" in order to emulate the binning of the monitors or to a
+                    string list with the same format that is used for the Rebin algorithm. This property is ignored
+                    when saving as event data.
+    @param isOverlay: sets if the the overlay mechanism should be used when the saveAsEvent flag is set
+    @param time_shifts: provides additional time shifts if the isOverlay flag is specified. The time shifts are specifed
+                        in a string list. Either time_shifts is not used or a list with times in secomds. Note that there
+                        has to be one entry fewer than the number of workspaces to add.
+    @param defType: the file type
+    @param rawTypes: the raw types
+    @param lowMem: if the lowMem option should be used
+    @returns a success message
+    '''
+    # Need at least two runs to work
+    if len(runs) < 1:
+        print_message("AddRuns issue: A list with at least two runs needs to be provided.")
+        return
+
+    if time_shifts is None:
+        time_shifts = []
+
+    return add_runs(runs=runs,
+                    inst=instrument,
+                    defType=defType,
+                    rawTypes=rawTypes,
+                    lowMem=lowMem,
+                    binning=binning,
+                    saveAsEvent=saveAsEvent,
+                    isOverlay=isOverlay,
+                    time_shifts=time_shifts)
diff --git a/scripts/SANS/sans/command_interface/__init__.py b/scripts/SANS/sans/command_interface/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/scripts/SANS/sans/command_interface/batch_csv_file_parser.py b/scripts/SANS/sans/command_interface/batch_csv_file_parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e0f8b1ecf208cfd2dd1557a55864ada22db1a06
--- /dev/null
+++ b/scripts/SANS/sans/command_interface/batch_csv_file_parser.py
@@ -0,0 +1,158 @@
+from __future__ import (absolute_import, division, print_function)
+import re
+from csv import reader
+from sans.common.enums import BatchReductionEntry
+from sans.common.file_information import find_full_file_path
+from sans.common.constants import ALL_PERIODS
+
+
+class BatchCsvParser(object):
+    batch_file_keywords = {"sample_sans": BatchReductionEntry.SampleScatter,
+                           "output_as": BatchReductionEntry.Output,
+                           "sample_trans": BatchReductionEntry.SampleTransmission,
+                           "sample_direct_beam": BatchReductionEntry.SampleDirect,
+                           "can_sans": BatchReductionEntry.CanScatter,
+                           "can_trans": BatchReductionEntry.CanTransmission,
+                           "can_direct_beam": BatchReductionEntry.CanDirect,
+                           "user_file": BatchReductionEntry.UserFile}
+    batch_file_keywords_which_are_dropped = {"background_sans": None,
+                                             "background_trans": None,
+                                             "background_direct_beam": None}
+
+    data_keys = {BatchReductionEntry.SampleScatter: BatchReductionEntry.SampleScatterPeriod,
+                 BatchReductionEntry.SampleTransmission: BatchReductionEntry.SampleTransmissionPeriod,
+                 BatchReductionEntry.SampleDirect: BatchReductionEntry.SampleDirectPeriod,
+                 BatchReductionEntry.CanScatter: BatchReductionEntry.CanScatterPeriod,
+                 BatchReductionEntry.CanTransmission: BatchReductionEntry.CanTransmissionPeriod,
+                 BatchReductionEntry.CanDirect: BatchReductionEntry.CanDirectPeriod}
+
+    def __init__(self, batch_file_name):
+        super(BatchCsvParser, self).__init__()
+        # Get the full file path
+        self._batch_file_name = find_full_file_path(batch_file_name)
+        if not self._batch_file_name:
+            raise RuntimeError("batch_csv_file_parser: Could not find specified batch file. Make sure it is available"
+                               "in the Mantid path settings.")
+
+    def parse_batch_file(self):
+        """
+        Parses the batch csv file and returns the elements in a parsed form
+
+        Returns: parsed csv elements
+        """
+
+        parsed_rows = []
+
+        with open(self._batch_file_name, 'r') as csvfile:
+            batch_reader = reader(csvfile, delimiter=",")
+            row_number = 0
+            for row in batch_reader:
+                # Check if the row is empty
+                if not row:
+                    continue
+
+                # If the first element contains a # symbol then ignore it
+                if "MANTID_BATCH_FILE" in row[0]:
+                    continue
+
+                # Else we perform a parse of the row
+                parsed_row = self._parse_row(row, row_number)
+                parsed_rows.append(parsed_row)
+                row_number += 1
+        return parsed_rows
+
+    def _parse_row(self, row, row_number):
+        # Clean all elements of the row
+        row = list(map(str.strip, row))
+
+        # Go sequentially through the row with a stride of two. The user can either leave entries away, or he can leave
+        # them blank, ie ... , sample_direct_beam, , can_sans, XXXXX, ...  or even ..., , ,...
+        # This means we expect an even length of entries
+        if len(row) % 2 != 0:
+            raise RuntimeError("We expect an even number of entries, but row {0} has {1} entries.".format(row_number,
+                                                                                                          len(row)))
+        output = {}
+        # Special attention has to go to the specification of the period in a run number. The user can
+        # specify something like 5512p for sample scatter. This means she wants run number 5512 with period 7.
+        for key, value in zip(row[::2], row[1::2]):
+            if key in list(BatchCsvParser.batch_file_keywords.keys()):
+                new_key = BatchCsvParser.batch_file_keywords[key]
+                value = value.strip()
+                if BatchCsvParser._is_data_entry(new_key):
+                    run_number, period, period_key = BatchCsvParser._get_run_number_and_period(new_key, value)
+                    output.update({new_key: run_number})
+                    output.update({period_key: period})
+                else:
+                    output.update({new_key: value})
+            elif key in list(self.batch_file_keywords_which_are_dropped.keys()):
+                continue
+            else:
+                raise RuntimeError("The key {0} is not part of the SANS batch csv file keywords".format(key))
+
+        # Ensure that sample_scatter was set
+        if BatchReductionEntry.SampleScatter not in output or not output[BatchReductionEntry.SampleScatter]:
+            raise RuntimeError("The sample_scatter entry in row {0} seems to be missing.".format(row_number))
+
+        # Ensure that output_as was set
+        if BatchReductionEntry.Output not in output or not output[BatchReductionEntry.Output]:
+            raise RuntimeError("The output_as entry in row {0} seems to be missing.".format(row_number))
+
+        # Ensure that the transmission data for the sample is specified either completely or not at all.
+        has_sample_transmission = BatchReductionEntry.SampleTransmission in output and \
+                                  output[BatchReductionEntry.SampleTransmission]  # noqa
+        has_sample_direct_beam = BatchReductionEntry.SampleDirect in output and output[BatchReductionEntry.SampleDirect]
+
+        if (not has_sample_transmission and has_sample_direct_beam) or \
+                (has_sample_transmission and not has_sample_direct_beam):
+            raise RuntimeError("Inconsistent sample transmission settings in row {0}. Either both the transmission "
+                               "and the direct beam run are set or none.".format(row_number))
+
+        # Ensure that the transmission data for the can is specified either completely or not at all.
+        has_can_transmission = BatchReductionEntry.CanTransmission in output and \
+                               output[BatchReductionEntry.CanTransmission]  # noqa
+        has_can_direct_beam = BatchReductionEntry.CanDirect in output and output[BatchReductionEntry.CanDirect]
+
+        if (not has_can_transmission and has_can_direct_beam) or \
+                (has_can_transmission and not has_can_direct_beam):
+            raise RuntimeError("Inconsistent can transmission settings in row {0}. Either both the transmission "
+                               "and the direct beam run are set or none.".format(row_number))
+
+        # Ensure that can scatter is specified if the transmissions are set
+        has_can_scatter = BatchReductionEntry.CanScatter in output and output[BatchReductionEntry.CanScatter]
+        if not has_can_scatter and has_can_transmission:
+            raise RuntimeError("The can transmission was set but not the scatter file in row {0}.".format(row_number))
+        return output
+
+    @staticmethod
+    def _is_data_entry(entry):
+        data_entry_keys = list(BatchCsvParser.data_keys.keys())
+        for data_enum in data_entry_keys:
+            if entry is data_enum:
+                return True
+        return False
+
+    @staticmethod
+    def _get_run_number_and_period(data_type, entry):
+        """
+        Gets the run number and the period from a csv data entry.
+
+        @patam data_type: the type of data entry, e.g. BatchReductionEntry.SampleScatter
+        @param entry: a data entry, e.g. 5512 or 5512p7
+        @return: the run number, the period selection and the corresponding key word
+        """
+        data_period_type = BatchCsvParser.data_keys[data_type]
+
+        # Slice off period if it exists. If it does not exist, then the period is ALL_PERIODS
+        period_pattern = "[p,P][0-9]$"
+
+        has_period = re.search(period_pattern, entry)
+
+        period = ALL_PERIODS
+        run_number = entry
+        if has_period:
+            run_number = re.sub(period_pattern, "", entry)
+            period_partial = re.sub(run_number, "", entry)
+            period = re.sub("[p,P]", "", period_partial)
+            period = int(period)
+
+        return run_number, period, data_period_type
diff --git a/scripts/SANS/sans/command_interface/command_interface_functions.py b/scripts/SANS/sans/command_interface/command_interface_functions.py
new file mode 100644
index 0000000000000000000000000000000000000000..d38084b9ab244f730e6f2e1723782af2b41c3d9c
--- /dev/null
+++ b/scripts/SANS/sans/command_interface/command_interface_functions.py
@@ -0,0 +1,18 @@
+from __future__ import (absolute_import, division, print_function)
+from mantid.kernel import Logger
+
+
+VERBOSE = False
+sans_log = Logger("SANS")
+
+
+# Print a message and log it if the
+def print_message(message, log=True, no_console=False):
+    if log and VERBOSE:
+        sans_log.notice(message)
+    if not no_console:
+        print(message)
+
+
+def warning_message(message):
+    sans_log.warning(message)
diff --git a/scripts/SANS/sans/command_interface/command_interface_state_director.py b/scripts/SANS/sans/command_interface/command_interface_state_director.py
new file mode 100644
index 0000000000000000000000000000000000000000..931172674aa076b1f1bbf227f063ca1af86d761e
--- /dev/null
+++ b/scripts/SANS/sans/command_interface/command_interface_state_director.py
@@ -0,0 +1,630 @@
+from __future__ import (absolute_import, division, print_function)
+from sans.common.enums import (serializable_enum, DataType)
+from sans.user_file.user_file_state_director import UserFileStateDirectorISIS
+from sans.state.data import get_data_builder
+from sans.user_file.user_file_parser import (UserFileParser)
+from sans.user_file.user_file_reader import (UserFileReader)
+from sans.user_file.user_file_common import (MonId, monitor_spectrum, OtherId, SampleId, GravityId, SetId, position_entry,
+                                             fit_general, FitId, monitor_file, mask_angle_entry, LimitsId, range_entry,
+                                             simple_range, DetectorId, event_binning_string_values, det_fit_range,
+                                             single_entry_with_detector)
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Commands
+# ----------------------------------------------------------------------------------------------------------------------
+
+
+# ------------------
+# IDs for commands. We use here serializable_enum since enum is not available in the current Python configuration.
+# ------------------
+@serializable_enum("sample_scatter", "sample_transmission", "sample_direct", "can_scatter", "can_transmission",
+                   "can_direct")
+class DataCommandId(object):
+    pass
+
+
+@serializable_enum("clean", "reduction_dimensionality", "compatibility_mode",  # Null Parameter commands
+                   "user_file", "mask", "sample_offset", "detector", "event_slices",  # Single parameter commands
+                   "flood_file", "wavelength_correction_file",  # Single parameter commands
+                   "user_specified_output_name", "user_specified_output_name_suffix",  # Single parameter commands
+                   "use_reduction_mode_as_suffix",  # Single parameter commands
+                   "incident_spectrum", "gravity",  # Double parameter commands
+                   "centre", "save",   # Three parameter commands
+                   "trans_fit", "phi_limit", "mask_radius", "wavelength_limit", "qxy_limit",  # Four parameter commands
+                   "wavrange_settings",  # Five parameter commands
+                   "front_detector_rescale",  # Six parameter commands
+                   "detector_offsets"  # Nine parameter commands
+                   )
+class NParameterCommandId(object):
+    pass
+
+
+class Command(object):
+    def __init__(self, command_id):
+        super(Command, self).__init__()
+        self.command_id = command_id
+
+
+class DataCommand(Command):
+    """
+    A command which is associated with setting data information.
+    """
+    def __init__(self, command_id, file_name, period=None):
+        super(DataCommand, self).__init__(command_id)
+        self.file_name = file_name
+        self.period = period
+
+
+class NParameterCommand(Command):
+    """
+    A command which has n parameters in a list.
+    """
+    def __init__(self, command_id, values):
+        super(NParameterCommand, self).__init__(command_id)
+        self.values = values
+
+
+class FitData(object):
+    """
+    Describes the fit mode. This is not part of the SANSType module since we only need it here. It is slightly
+    inconsistent but it is very localized.
+    """
+    class Sample(object):
+        pass
+
+    class Can(object):
+        pass
+
+    class Both(object):
+        pass
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Command Interface State Director
+
+# Explanation of the implementation
+#
+# Previously the ISISCommandInterface just executed commands one after another. Settings were stored in the reduction
+# singleton. Once in a while the reduction singleton was reset.
+#
+# Here we need to have state director which builds the SANS state out of these legacy commands. Note that before we
+# can process any of the commands we need to find the data entries, since they drive the reduction.
+# All other commands should be setting the SANSState in order.
+# ----------------------------------------------------------------------------------------------------------------------
+class CommandInterfaceStateDirector(object):
+    def __init__(self, facility):
+        super(CommandInterfaceStateDirector, self).__init__()
+        self._commands = []
+        self._user_file_state_director = None
+
+        self._processed_state_settings = {}
+
+        self._facility = facility
+        self._method_map = None
+        self._set_up_method_map()
+
+    def add_command(self, command):
+        self._commands.append(command)
+
+    def clear_commands(self):
+        self._commands = []
+        self._processed_state_settings = {}
+
+    def process_commands(self):
+        """
+        Here we process the commands that have been set. This would be triggered by a command which requests a reduction
+
+        The execution strategy is:
+        1. Find the data entries and great a SANSStateData object out of them
+        2. Go sequentially through the commands in a FIFO manner (except for the data entries)
+        3. Delete the processed state settings. We only need to retain the commands. If we also retain the
+           processed state settings then we will populate some entries twice.
+        4. Returns the constructed state
+        @returns a list of valid SANSState object which can be used for data reductions or raises an exception.
+        """
+        # 1. Get a SANSStateData object.
+        data_state = self._get_data_state()
+
+        # 2. Go through
+        state = self._process_command_queue(data_state)
+
+        # 3. Leave commands in place put clear the list of processed commands, else they will be reused.
+        self._processed_state_settings = {}
+
+        # 4. Provide the state
+        return state
+
+    def get_commands(self):
+        return self._commands
+
+    def _get_data_state(self):
+        # Get the data commands
+        data_commands = self._get_data_commands()
+
+        # Build the state data
+        data_builder = get_data_builder(self._facility)
+        self._set_data_element(data_builder.set_sample_scatter, data_builder.set_sample_scatter_period,
+                               DataCommandId.sample_scatter, data_commands)
+        self._set_data_element(data_builder.set_sample_transmission, data_builder.set_sample_transmission_period,
+                               DataCommandId.sample_transmission, data_commands)
+        self._set_data_element(data_builder.set_sample_direct, data_builder.set_sample_direct_period,
+                               DataCommandId.sample_direct, data_commands)
+        self._set_data_element(data_builder.set_can_scatter, data_builder.set_can_scatter_period,
+                               DataCommandId.can_scatter, data_commands)
+        self._set_data_element(data_builder.set_can_transmission, data_builder.set_can_transmission_period,
+                               DataCommandId.can_transmission, data_commands)
+        self._set_data_element(data_builder.set_can_direct, data_builder.set_can_direct_period,
+                               DataCommandId.can_direct, data_commands)
+
+        return data_builder.build()
+
+    def _get_data_commands(self):
+        """
+        Grabs and removes the data commands from the command queue.
+
+        @return: a list of data commands
+        """
+        # Grab the data commands
+        data_commands = [element for element in self._commands if isinstance(element, DataCommand)]
+        return data_commands
+
+    def _set_data_element(self, data_builder_file_setter, data_builder_period_setter, command_id, commands):
+        """
+        Sets a data element (e.g. sample scatter file and sample scatter period) on the data builder.
+
+        @param data_builder_file_setter: a handle to the correct setter for the file on the data builder.
+        @param data_builder_period_setter: a handle to the correct setter for the period on the data builder.
+        @param command_id: the command id
+        @param commands: a list of commands.
+        """
+        data_elements = self._get_elements_with_key(command_id, commands)
+
+        # If there is no element, then there is nothing to do
+        if len(data_elements) == 0:
+            return
+
+        # If there is more than one element, then we are only interested in the last element. The user could
+        # have overriden his wishes, e.g.
+        # ...
+        # AssignSample('SANS2D1234')
+        # ...
+        # AssignSample('SANS2D4321')
+        # ...
+        # We select therefore the last element
+        data_element = data_elements[-1]
+        file_name = data_element.file_name
+        period = data_element.period
+        data_builder_file_setter(file_name)
+        data_builder_period_setter(period)
+
+    @staticmethod
+    def _get_elements_with_key(command_id, command_list):
+        """
+        Get all elements in the command list with a certain id
+
+        @param command_id: the id of the command.
+        @param command_list: a list of commands.
+        @return: a list of commands which match the id.
+        """
+        return [element for element in command_list if element.command_id is command_id]
+
+    def _process_command_queue(self, data_state):
+        """
+        Process the command queue sequentially as FIFO structure
+
+        @param data_state: the data state.
+        @return: a SANSState object.
+        """
+        self._user_file_state_director = UserFileStateDirectorISIS(data_state)
+
+        # If we have a clean instruction in there, then we should apply it to all commands
+        self._apply_clean_if_required()
+
+        # Evaluate all commands which adds them to the _processed_state_settings dictionary,
+        # except for DataCommands which we deal with separately
+        for command in self._commands:
+            if isinstance(command, DataCommand):
+                continue
+            command_id = command.command_id
+            process_function = self._method_map[command_id]
+            process_function(command)
+
+        # The user file state director
+        self._user_file_state_director.add_state_settings(self._processed_state_settings)
+        return self._user_file_state_director.construct()
+
+    def _set_up_method_map(self):
+        """
+        Sets up a mapping between command ids and the adequate processing methods which can handle the command.
+        """
+        self._method_map = {NParameterCommandId.user_file: self._process_user_file,
+                            NParameterCommandId.mask: self._process_mask,
+                            NParameterCommandId.incident_spectrum: self._process_incident_spectrum,
+                            NParameterCommandId.clean: self._process_clean,
+                            NParameterCommandId.reduction_dimensionality: self._process_reduction_dimensionality,
+                            NParameterCommandId.sample_offset: self._process_sample_offset,
+                            NParameterCommandId.detector: self._process_detector,
+                            NParameterCommandId.gravity: self._process_gravity,
+                            NParameterCommandId.centre: self._process_centre,
+                            NParameterCommandId.trans_fit: self._process_trans_fit,
+                            NParameterCommandId.front_detector_rescale: self._process_front_detector_rescale,
+                            NParameterCommandId.event_slices: self._process_event_slices,
+                            NParameterCommandId.flood_file: self._process_flood_file,
+                            NParameterCommandId.phi_limit: self._process_phi_limit,
+                            NParameterCommandId.wavelength_correction_file: self._process_wavelength_correction_file,
+                            NParameterCommandId.mask_radius: self._process_mask_radius,
+                            NParameterCommandId.wavelength_limit: self._process_wavelength_limit,
+                            NParameterCommandId.qxy_limit: self._process_qxy_limit,
+                            NParameterCommandId.wavrange_settings: self._process_wavrange,
+                            NParameterCommandId.compatibility_mode: self._process_compatibility_mode,
+                            NParameterCommandId.detector_offsets: self._process_detector_offsets,
+                            NParameterCommandId.save: self._process_save,
+                            NParameterCommandId.user_specified_output_name: self._process_user_specified_output_name,
+                            NParameterCommandId.user_specified_output_name_suffix:
+                                self._process_user_specified_output_name_suffix,
+                            NParameterCommandId.use_reduction_mode_as_suffix:
+                                self._process_use_reduction_mode_as_suffix
+                            }
+
+    def add_to_processed_state_settings(self, new_state_settings, treat_list_as_element=False):
+        """
+        Adds the new entries to the already processed state settings
+
+        @param new_state_settings: a dictionary with new entries for the processed state settings
+        @param treat_list_as_element: if we have a list and add it for the fist time, then we should treat it as an
+                                      element if true. For example, if the state is [1, 2] the a new settint would, be
+                                      [[1, 2,]] and not [1, 2]. With a further entry it could be [[1,2], [3,4]].
+        """
+        for key, value in list(new_state_settings.items()):
+            # Add the new entry
+            # 1. A similar entry can already exist, then append it (or extend it)
+            # 2. The entry does not exist, but it is in form of a list (you would get that for example when
+            #    dealing with input from the UserFileReader
+            # 3. The entry does not exist and is not in a list. In this case we need to add it to a list.
+            if key in self._processed_state_settings:
+                # If the key already exists then we have to be careful. We have the current value V = [A, B, ...]
+                # and our new element N
+                # i. If the existing entries (ie A, ...) are not lists and N is not a list, then append to V.
+                # ii. If the existing entries (ie A, ...) are not lists and N is a list then extend V.
+                # iii. If the existing entries (ie A, ...) are lists and N is a list then append to V.
+                # iv. If the existing entries (ie A, ...) are lists and N is not a list, then raise
+                # The reason we have to be careful is that we might get an N from a user file which comes always already
+                # in the form of a list.
+                old_values = self._processed_state_settings[key]
+                is_old_first_entry_a_list = isinstance(old_values[0], list)
+                is_new_entry_a_list = isinstance(value, list)
+
+                if not is_old_first_entry_a_list and not is_new_entry_a_list:
+                    old_values.append(value)
+                elif not is_old_first_entry_a_list and is_new_entry_a_list:
+                    old_values.extend(value)
+                elif is_old_first_entry_a_list and is_new_entry_a_list:
+                    old_values.append(value)
+                else:
+                    raise RuntimeError("CommandInterfaceStateDirector: Trying to insert {0} which is a list into {0} "
+                                       "which is collection of non-list elements".format(value, old_values))
+            elif isinstance(value, list) and treat_list_as_element:
+                self._processed_state_settings.update({key: [value]})
+            elif isinstance(value, list):
+                self._processed_state_settings.update({key: value})
+            else:
+                self._processed_state_settings.update({key: [value]})
+
+    def _process_user_file(self, command):
+        """
+        Processes a user file and retain the parased tags
+
+        @param command: the command with the user file path
+        """
+        file_name = command.values[0]
+        user_file_reader = UserFileReader(file_name)
+        new_state_entries = user_file_reader.read_user_file()
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_mask(self, command):
+        """
+        We need to process a mask line as specified in the user file.
+        """
+        mask_command = command.values[0]
+        # Use the user file parser to extract the values from the user file setting.
+        user_file_parser = UserFileParser()
+        parsed_output = user_file_parser.parse_line(mask_command)
+        self.add_to_processed_state_settings(parsed_output)
+
+    def _process_incident_spectrum(self, command):
+        incident_monitor = command.values[0]
+        interpolate = command.values[1]
+        is_trans = command.values[2]
+        new_state_entries = {MonId.spectrum: monitor_spectrum(spectrum=incident_monitor,
+                                                              is_trans=is_trans,
+                                                              interpolate=interpolate)}
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _apply_clean_if_required(self):
+        """
+        The cleans all commands up to the clean command point.
+
+        We have to do this clean before we start processing the elements.
+        """
+        index_first_clean_command = None
+        for index in reversed(list(range(0, len(self._commands)))):
+            element = self._commands[index]
+            if element.command_id == NParameterCommandId.clean:
+                index_first_clean_command = index
+                break
+        if index_first_clean_command is not None:
+            del(self._commands[0:(index_first_clean_command + 1)])
+            self._processed_state_settings = {}
+
+    def _process_clean(self, command):
+        _ = command  # noqa
+        raise RuntimeError("Trying the process a Clean command. The clean command should have removed itself and "
+                           "all previous commands. If it is still here, then this is a bug")
+
+    def _process_reduction_dimensionality(self, command):
+        _ = command  # noqa
+        reduction_dimensionality = command.values[0]
+        new_state_entries = {OtherId.reduction_dimensionality: reduction_dimensionality}
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_sample_offset(self, command):
+        sample_offset = command.values[0]
+        new_state_entries = {SampleId.offset: sample_offset}
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_detector(self, command):
+        reduction_mode = command.values[0]
+        new_state_entries = {DetectorId.reduction_mode: reduction_mode}
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_gravity(self, command):
+        use_gravity = command.values[0]
+        extra_length = command.values[1]
+        new_state_entries = {GravityId.on_off: use_gravity,
+                             GravityId.extra_length: extra_length}
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_centre(self, command):
+        pos1 = command.values[0]
+        pos2 = command.values[1]
+        detector_type = command.values[2]
+        new_state_entries = {SetId.centre: position_entry(pos1=pos1, pos2=pos2, detector_type=detector_type)}
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_trans_fit(self, command):
+        def fit_type_to_data_type(fit_type_to_convert):
+            return DataType.Can if fit_type_to_convert is FitData.Can else DataType.Sample
+
+        fit_data = command.values[0]
+        wavelength_low = command.values[1]
+        wavelength_high = command.values[2]
+        fit_type = command.values[3]
+        polynomial_order = command.values[4]
+        if fit_data is FitData.Both:
+            data_to_fit = [FitData.Sample, FitData.Can]
+        else:
+            data_to_fit = [fit_data]
+
+        new_state_entries = {}
+        for element in data_to_fit:
+            data_type = fit_type_to_data_type(element)
+            new_state_entries.update({FitId.general: fit_general(start=wavelength_low, stop=wavelength_high,
+                                                                 fit_type=fit_type, data_type=data_type,
+                                                                 polynomial_order=polynomial_order)})
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_front_detector_rescale(self, command):
+        scale = command.values[0]
+        shift = command.values[1]
+        fit_scale = command.values[2]
+        fit_shift = command.values[3]
+        q_min = command.values[4]
+        q_max = command.values[5]
+
+        # Set the scale and the shift
+        new_state_entries = {DetectorId.rescale: scale, DetectorId.shift: shift}
+
+        # Set the fit fot the scale
+        new_state_entries.update({DetectorId.rescale_fit: det_fit_range(start=q_min, stop=q_max, use_fit=fit_scale)})
+
+        # Set the fit for shift
+        new_state_entries.update({DetectorId.shift_fit: det_fit_range(start=q_min, stop=q_max, use_fit=fit_shift)})
+
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_event_slices(self, command):
+        event_slice_value = command.values
+        new_state_entries = {OtherId.event_slices: event_binning_string_values(value=event_slice_value)}
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_flood_file(self, command):
+        file_path = command.values[0]
+        detector_type = command.values[1]
+        new_state_entries = {MonId.flat: monitor_file(file_path=file_path, detector_type=detector_type)}
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_phi_limit(self, command):
+        phi_min = command.values[0]
+        phi_max = command.values[1]
+        use_phi_mirror = command.values[2]
+        new_state_entries = {LimitsId.angle: mask_angle_entry(min=phi_min, max=phi_max, use_mirror=use_phi_mirror)}
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_wavelength_correction_file(self, command):
+        file_path = command.values[0]
+        detector_type = command.values[1]
+        new_state_entries = {MonId.direct: monitor_file(file_path=file_path, detector_type=detector_type)}
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_mask_radius(self, command):
+        radius_min = command.values[0]
+        radius_max = command.values[1]
+        new_state_entries = {LimitsId.radius: range_entry(start=radius_min, stop=radius_max)}
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_wavelength_limit(self, command):
+        wavelength_low = command.values[0]
+        wavelength_high = command.values[1]
+        wavelength_step = command.values[2]
+        wavelength_step_type = command.values[3]
+        new_state_entries = {LimitsId.wavelength: simple_range(start=wavelength_low, stop=wavelength_high,
+                                                               step=wavelength_step, step_type=wavelength_step_type)}
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_wavrange(self, command):
+        wavelength_low = command.values[0]
+        wavelength_high = command.values[1]
+        full_wavelength_range = command.values[2]
+        reduction_mode = command.values[3]
+
+        # Update the lower and the upper wavelength values. Note that this is considered an incomplete setting, since
+        # not step or step type have been specified. This means we need to update one of the processed commands, which
+        # is not nice but the command interface forces us to do so. We take a copy of the last LimitsId.wavelength
+        # entry, we copy it and then change the desired settings. This means it has to be set at this point, else
+        # something is wrong
+        if LimitsId.wavelength in self._processed_state_settings:
+            last_entry = self._processed_state_settings[LimitsId.wavelength][-1]
+
+            new_wavelength_low = wavelength_low if wavelength_low is not None else last_entry.start
+            new_wavelength_high = wavelength_high if wavelength_high is not None else last_entry.stop
+            new_range = simple_range(start=new_wavelength_low, stop=new_wavelength_high, step=last_entry.step,
+                                     step_type=last_entry.step_type)
+
+            if wavelength_low is not None or wavelength_high is not None:
+                copied_entry = {LimitsId.wavelength: new_range}
+                self.add_to_processed_state_settings(copied_entry)
+        else:
+            raise RuntimeError("CommandInterfaceStateDirector: Setting the lower and upper wavelength bounds is not"
+                               " possible. We require also a step and step range")
+
+        if full_wavelength_range is not None:
+            full_wavelength_range_entry = {OtherId.use_full_wavelength_range: full_wavelength_range}
+            self.add_to_processed_state_settings(full_wavelength_range_entry)
+
+        if reduction_mode is not None:
+            reduction_mode_entry = {DetectorId.reduction_mode: reduction_mode}
+            self.add_to_processed_state_settings(reduction_mode_entry)
+
+    def _process_qxy_limit(self, command):
+        q_min = command.values[0]
+        q_max = command.values[1]
+        q_step = command.values[2]
+        q_step_type = command.values[3]
+        new_state_entries = {LimitsId.qxy: simple_range(start=q_min, stop=q_max, step=q_step, step_type=q_step_type)}
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_compatibility_mode(self, command):
+        use_compatibility_mode = command.values[0]
+        new_state_entries = {OtherId.use_compatibility_mode: use_compatibility_mode}
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_detector_offsets(self, command):
+        detector_type = command.values[0]
+        x = command.values[1]
+        y = command.values[2]
+        z = command.values[3]
+        rotation = command.values[4]
+        radius = command.values[5]
+        side = command.values[6]
+        x_tilt = command.values[7]
+        y_tilt = command.values[8]
+
+        # Set the offsets
+        new_state_entries = {DetectorId.correction_x: single_entry_with_detector(entry=x, detector_type=detector_type),
+                             DetectorId.correction_y: single_entry_with_detector(entry=y, detector_type=detector_type),
+                             DetectorId.correction_z: single_entry_with_detector(entry=z, detector_type=detector_type),
+                             DetectorId.correction_rotation:
+                                 single_entry_with_detector(entry=rotation, detector_type=detector_type),
+                             DetectorId.correction_radius:
+                                 single_entry_with_detector(entry=radius, detector_type=detector_type),
+                             DetectorId.correction_translation:
+                                 single_entry_with_detector(entry=side, detector_type=detector_type),
+                             DetectorId.correction_x_tilt:
+                                 single_entry_with_detector(entry=x_tilt, detector_type=detector_type),
+                             DetectorId.correction_y_tilt:
+                                 single_entry_with_detector(entry=y_tilt, detector_type=detector_type),
+                             }
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_save(self, command):
+        save_algorithms = command.values[0]
+        save_as_zero_error_free = command.values[1]
+        new_state_entries = {OtherId.save_types: save_algorithms,
+                             OtherId.save_as_zero_error_free: save_as_zero_error_free}
+        self.add_to_processed_state_settings(new_state_entries,  treat_list_as_element=True)
+
+    def _process_user_specified_output_name(self, command):
+        user_specified_output_name = command.values[0]
+        new_state_entry = {OtherId.user_specified_output_name: user_specified_output_name}
+        self.add_to_processed_state_settings(new_state_entry)
+
+    def _process_user_specified_output_name_suffix(self, command):
+        user_specified_output_name_suffix = command.values[0]
+        new_state_entry = {OtherId.user_specified_output_name_suffix: user_specified_output_name_suffix}
+        self.add_to_processed_state_settings(new_state_entry)
+
+    def _process_use_reduction_mode_as_suffix(self, command):
+        use_reduction_mode_as_suffix = command.values[0]
+        new_state_entry = {OtherId.use_reduction_mode_as_suffix: use_reduction_mode_as_suffix}
+        self.add_to_processed_state_settings(new_state_entry)
+
+    def remove_last_user_file(self):
+        """
+        Removes the last added user file from the commands.
+
+        See _remove_last_element for further explanation.
+        """
+        self._remove_last_element(NParameterCommandId.user_file)
+
+    def remove_last_scatter_sample(self):
+        """
+        Removes the last added scatter sample from the commands.
+
+        See _remove_last_element for further explanation.
+        """
+        self._remove_last_element(DataCommandId.sample_scatter)
+
+    def remove_last_sample_transmission_and_direct(self):
+        """
+        Removes the last added scatter transmission and direct from the commands.
+
+        See _remove_last_element for further explanation.
+        """
+        self._remove_last_element(DataCommandId.sample_transmission)
+        self._remove_last_element(DataCommandId.sample_direct)
+
+    def remove_last_scatter_can(self):
+        """
+        Removes the last added scatter can from the commands.
+
+        See _remove_last_element for further explanation.
+        """
+        self._remove_last_element(DataCommandId.can_scatter)
+
+    def remove_last_can_transmission_and_direct(self):
+        """
+        Removes the last added can transmission and direct from the commands.
+
+        See _remove_last_element for further explanation.
+        """
+        self._remove_last_element(DataCommandId.can_transmission)
+        self._remove_last_element(DataCommandId.can_direct)
+
+    def _remove_last_element(self, command_id):
+        """
+        Removes the last instance of a command associated with the command_id.
+
+        This method is vital for batch reduction.
+        TODO: more explanation
+        @param command_id: the command_id of the command which whose last instance we want to remove
+        """
+        index_to_remove = None
+        for index, element in reversed(list(enumerate(self._commands))):
+            if element.command_id == command_id:
+                index_to_remove = index
+                break
+        if index_to_remove is not None:
+            del(self._commands[index_to_remove])
+        else:
+            raise RuntimeError("Tried to delete the last instance of {0}, but none was present in the list of "
+                               "commands".format(command_id))
diff --git a/scripts/SANS/sans/common/enums.py b/scripts/SANS/sans/common/enums.py
index 30a7ba73dc4d013b1dda97cd3c9b97395cf39e53..35821dbe081f9e4a2983f453e6e5626c71ce68a9 100644
--- a/scripts/SANS/sans/common/enums.py
+++ b/scripts/SANS/sans/common/enums.py
@@ -46,8 +46,8 @@ def string_convertible(cls):
                @serializable_enum
                class MyClass(object):
                 ...
-    @param cls: a reference to the class
-    @return: the class
+    :param cls: a reference to the class
+    :return: the class
     """
     def to_string(elements, convert_to_string):
         for key, value in list(elements.items()):
diff --git a/scripts/SANS/sans/common/file_information.py b/scripts/SANS/sans/common/file_information.py
index 10c22c97eb5edf2961d4fc34b78ea7cecee74289..19725adef267b0c3a1bbe9016f34ccb5dc9c757b 100644
--- a/scripts/SANS/sans/common/file_information.py
+++ b/scripts/SANS/sans/common/file_information.py
@@ -249,8 +249,8 @@ def convert_to_shape(shape_flag):
     """
     Converts a shape flag to a shape object.
 
-    @param shape_flag: a geometry flag which can be 1, 2 or 3
-    @return: a shape object
+    :param shape_flag: a geometry flag which can be 1, 2 or 3
+    :return: a shape object
     """
     if shape_flag == 1:
         shape = SampleShape.CylinderAxisUp
@@ -374,8 +374,8 @@ def get_geometry_information_isis_nexus(file_name):
     """
     Gets geometry information from the sample folder in the nexus file
 
-    @param file_name:
-    @return: height, width, thickness, shape
+    :param file_name:
+    :return: height, width, thickness, shape
     """
     with h5.File(file_name) as h5_file:
         # Open first entry
@@ -407,7 +407,6 @@ def get_geometry_information_isis_nexus(file_name):
 #    file where the first level entry will be named mantid_workspace_X where X=1,2,3,... . Note that the numbers
 #    correspond  to periods.
 # 3. Scenario 2: Added event data, ie files which were added and saved as event data.
-# 3.1 TODO
 
 
 def get_date_and_run_number_added_nexus(file_name):
@@ -564,8 +563,8 @@ def get_geometry_information_isis_added_nexus(file_name):
     """
     Gets geometry information from the sample folder in an added nexus file
 
-    @param file_name: the file name
-    @return: height, width, thickness, shape
+    :param file_name: the file name
+    :return: height, width, thickness, shape
     """
     with h5.File(file_name) as h5_file:
         # Open first entry
@@ -704,8 +703,8 @@ def get_geometry_information_raw(file_name):
     """
     Gets the geometry information form the table workspace with the spb information
 
-    @param file_name: the full file name to an existing raw file.
-    @return: height, width, thickness and shape
+    :param file_name: the full file name to an existing raw file.
+    :return: height, width, thickness and shape
     """
     alg_info = AlgorithmManager.createUnmanaged("RawFileInfo")
     alg_info.initialize()
diff --git a/scripts/SANS/sans/common/general_functions.py b/scripts/SANS/sans/common/general_functions.py
index 34f1640f43b83e9684162e32d5a452be227e9af0..01bdbef05278ea96f5e821eb2cd647e56faf0682 100644
--- a/scripts/SANS/sans/common/general_functions.py
+++ b/scripts/SANS/sans/common/general_functions.py
@@ -23,10 +23,10 @@ def get_log_value(run, log_name, log_type):
 
     There are two options here. Either the log is a scalar or a vector. In the case of a scalar there is not much
     left to do. In the case of a vector we select the first element whose time_stamp is after the start time of the run
-    @param run: a Run object.
-    @param log_name: the name of the log entry
-    @param log_type: the expected type fo the log entry
-    @return: the log entry
+    :param run: a Run object.
+    :param log_name: the name of the log entry
+    :param log_type: the expected type fo the log entry
+    :return: the log entry
     """
     try:
         # Scalar case
@@ -122,10 +122,10 @@ def create_child_algorithm(parent_alg, name, **kwargs):
     """
     Creates a child algorithm from a parent algorithm
 
-    @param parent_alg: a handle to the parent algorithm
-    @param name: the name of the child algorithm
-    @param kwargs: a argument dict
-    @return: the child algorithm
+    :param parent_alg: a handle to the parent algorithm
+    :param name: the name of the child algorithm
+    :param kwargs: a argument dict
+    :return: the child algorithm
     """
     if parent_alg:
         alg = parent_alg.createChildAlgorithm(name)
@@ -142,8 +142,8 @@ def get_input_workspace_as_copy_if_not_same_as_output_workspace(alg):
     This function checks if the input workspace is the same as the output workspace, if so then it returns the
     workspace else it creates a copy of the input in order for it to be consumed.
 
-    @param alg: a handle to the algorithm which has a InputWorkspace property and a OutputWorkspace property
-    @return: a workspace
+    :param alg: a handle to the algorithm which has a InputWorkspace property and a OutputWorkspace property
+    :return: a workspace
     """
     def _clone_input(_ws):
         clone_name = "CloneWorkspace"
@@ -255,7 +255,7 @@ def get_ads_workspace_references():
     """
     Gets a list of handles of available workspaces on the ADS
 
-    @return: the workspaces on the ADS.
+    :return: the workspaces on the ADS.
     """
     for workspace_name in AnalysisDataService.getObjectNames():
         yield AnalysisDataService.retrieve(workspace_name)
@@ -274,8 +274,8 @@ def convert_bank_name_to_detector_type_isis(detector_name):
             but also allowed main
     LARMOR: DetectorBench      -> LAB
 
-    @param detector_name: a string with a valid detector name
-    @return: a detector type depending on the input string, or a runtime exception.
+    :param detector_name: a string with a valid detector name
+    :return: a detector type depending on the input string, or a runtime exception.
     """
     detector_name = detector_name.upper()
     detector_name = detector_name.strip()
@@ -300,8 +300,8 @@ def is_part_of_reduced_output_workspace_group(state):
 
     Note: that this is a hacky solution to for the return value of WavRangeReduction in ISISCommandInterface.
           Improve this!!! (Maybe by getting rid of the return value of WavRangeReduction)
-    @param state: a state object.
-    @return: True if the reduced output is a workspace group else false
+    :param state: a state object.
+    :return: True if the reduced output is a workspace group else false
     """
     # 1. Multi-period input
     data_info = state.data
@@ -426,11 +426,11 @@ def get_bins_for_rebin_setting(min_value, max_value, step_value, step_type):
     """
     Creates a list of bins for the rebin setting.
 
-    @param min_value: the minimum value
-    @param max_value: the maximum value
-    @param step_value: the step value
-    @param step_type: the step type, ie if linear or logarithmic
-    @return: a list of bin values
+    :param min_value: the minimum value
+    :param max_value: the maximum value
+    :param step_value: the step value
+    :param step_type: the step type, ie if linear or logarithmic
+    :return: a list of bin values
     """
     lower_bound = min_value
     bins = []
@@ -462,11 +462,11 @@ def get_ranges_for_rebin_setting(min_value, max_value, step_value, step_type):
     """
     Creates two lists of lower and upper bounds for the
 
-    @param min_value: the minimum value
-    @param max_value: the maximum value
-    @param step_value: the step value
-    @param step_type: the step type, ie if linear or logarithmic
-    @return: two ranges lists, one for the lower and one for the upper bounds.
+    :param min_value: the minimum value
+    :param max_value: the maximum value
+    :param step_value: the step value
+    :param step_type: the step type, ie if linear or logarithmic
+    :return: two ranges lists, one for the lower and one for the upper bounds.
     """
     bins = get_bins_for_rebin_setting(min_value, max_value, step_value, step_type)
     return get_range_lists_from_bin_list(bins)
@@ -476,8 +476,8 @@ def get_ranges_for_rebin_array(rebin_array):
     """
     Converts a rebin string into min, step (+ step_type), max
 
-    @param rebin_array: a simple rebin array, ie min, step, max
-    @return: two ranges lists, one for the lower and one for the upper bounds.
+    :param rebin_array: a simple rebin array, ie min, step, max
+    :return: two ranges lists, one for the lower and one for the upper bounds.
     """
     min_value = rebin_array[0]
     step_value = rebin_array[1]
@@ -633,8 +633,8 @@ def get_base_name_from_multi_period_name(workspace_name):
     """
     Gets a base name from a multiperiod name. The multiperiod name is NAME_xxx and the base name is NAME
 
-    @param workspace_name: a workspace name string
-    @return: the base name
+    :param workspace_name: a workspace name string
+    :return: the base name
     """
     multi_period_workspace_form = "_[0-9]+$"
     if re.search(multi_period_workspace_form, workspace_name) is not None:
@@ -650,8 +650,8 @@ def sanitise_instrument_name(instrument_name):
 
     Unfortunately the instrument names are sometimes truncated or extended. This is possible since they are strings
     and not types.
-    @param instrument_name: a instrument name string
-    @return: a sanitises instrument name string
+    :param instrument_name: a instrument name string
+    :return: a sanitises instrument name string
     """
     instrument_name_upper = instrument_name.upper()
     if re.search(LOQ, instrument_name_upper):
@@ -761,8 +761,8 @@ def does_can_workspace_exist_on_ads(can_workspace):
     """
     Checks if a can workspace already exists on the ADS, based on the stored hash
 
-    @param can_workspace: a handle to the can workspace
-    @return: True if the workspace exists on the ADS else False
+    :param can_workspace: a handle to the can workspace
+    :return: True if the workspace exists on the ADS else False
     """
     if not has_tag(REDUCED_CAN_TAG, can_workspace):
         return False
diff --git a/scripts/SANS/sans/common/xml_parsing.py b/scripts/SANS/sans/common/xml_parsing.py
index 5367f4c26154bbe7251c0ddd91e449652710b2eb..c2c924e765bf03b10d5cef7c00ebf1d3c9dfcda9 100644
--- a/scripts/SANS/sans/common/xml_parsing.py
+++ b/scripts/SANS/sans/common/xml_parsing.py
@@ -30,15 +30,19 @@ def get_named_elements_from_ipf_file(ipf_file, names_to_search, value_type):
     """
     output = {}
     number_of_elements_to_search = len(names_to_search)
+
     for _, element in eTree.iterparse(ipf_file):
         if element.tag == "parameter" and "name" in list(element.keys()):
-            if element.get("name") in names_to_search:
+            # Ideally we would break the for loop if we have found all the elements we are looking for.
+            # BUT: a not completed generator eTree.iterparse emits a ResourceWarning if we don't finish the generator.
+            #  There is also no method to close the file manually, hence we run through the whole file. Note that there
+            # is an existing bug report here: https://bugs.python.org/issue25707
+            if number_of_elements_to_search != len(output) and element.get("name") in names_to_search:
                 sub_element = element.find("value")
                 value = sub_element.get("val")
                 output.update({element.get("name"): value_type(value)})
                 element.clear()
-                if number_of_elements_to_search == len(output):
-                    break
+
     return output
 
 
diff --git a/scripts/SANS/sans/sans_batch.py b/scripts/SANS/sans/sans_batch.py
new file mode 100644
index 0000000000000000000000000000000000000000..d363a3af68e3a14d6f2b5f1e56cb28ffe4b48ad7
--- /dev/null
+++ b/scripts/SANS/sans/sans_batch.py
@@ -0,0 +1,69 @@
+# pylint: disable=invalid-name
+""" SANBatchReduction algorithm is the starting point for any new type reduction, event single reduction"""
+from __future__ import (absolute_import, division, print_function)
+from sans.state.state import State
+from sans.algorithm_detail.batch_execution import (single_reduction_for_batch)
+from sans.common.enums import (OutputMode)
+
+
+class SANSBatchReduction(object):
+    def __init__(self):
+        super(SANSBatchReduction, self).__init__()
+
+    def __call__(self, states, use_optimizations=True, output_mode=OutputMode.PublishToADS):
+        """
+        This is the start of any reduction.
+
+        :param states: This is a list of sans states. Each state in the list corresponds to a single reduction.
+        :param use_optimizations: if True then the optimizations for file reloading are used.
+        :param output_mode: The output mode defines how the reduced data should be published. This can be
+                            1. PublishToADS
+                            2. SaveToFile
+                            3. Both
+        """
+        self.validate_inputs(states, use_optimizations, output_mode)
+
+        self._execute(states, use_optimizations, output_mode)
+
+    @staticmethod
+    def _execute(states, use_optimizations, output_mode):
+        # Iterate over each state, load the data and perform the reduction
+        for state in states:
+            single_reduction_for_batch(state, use_optimizations, output_mode)
+
+    def validate_inputs(self, states, use_optimizations, output_mode):
+        # We are strict about the types here.
+        # 1. states has to be a list of sans state objects
+        # 2. use_optimizations has to be bool
+        # 3. output_mode has to be an OutputMode enum
+        if not isinstance(states, list):
+            raise RuntimeError("The provided states are not in a list. They have to be in a list.")
+
+        for state in states:
+            if not isinstance(state, State):
+                raise RuntimeError("The entries have to be sans state objects. "
+                                   "The provided type is {0}".format(type(state)))
+
+        if not isinstance(use_optimizations, bool):
+            raise RuntimeError("The optimization has to be a boolean. The provided type is"
+                               " {0}".format(type(use_optimizations)))
+
+        if output_mode is not OutputMode.PublishToADS and output_mode is not OutputMode.SaveToFile and\
+                        output_mode is not OutputMode.Both:  # noqa
+            raise RuntimeError("The output mode has to be an enum of type OutputMode. The provided type is"
+                               " {0}".format(type(output_mode)))
+
+        errors = self._validate_inputs(states)
+        if errors:
+            raise RuntimeError("The provided states are not valid: {}".format(errors))
+
+    @staticmethod
+    def _validate_inputs(states):
+        errors = dict()
+        # Check that the input can be converted into the right state object
+        try:
+            for state in states:
+                state.validate()
+        except ValueError as err:
+            errors.update({"SANSBatchReduction": str(err)})
+        return errors
diff --git a/scripts/SANS/sans/state/automatic_setters.py b/scripts/SANS/sans/state/automatic_setters.py
index 13444919704dba00d79bfdd78bd0b576c8e52058..7ce7d9c3467a435064c20a59cbc255a77aa85793 100644
--- a/scripts/SANS/sans/state/automatic_setters.py
+++ b/scripts/SANS/sans/state/automatic_setters.py
@@ -132,8 +132,8 @@ def set_up_setter_forwarding_from_director_to_builder(director, builder_name):
 
     The method will look for any set_XXX method in the builder and add an equivalent method set_builder_XXX which is
     forwarded to set_XXX.
-    @param director: a director object
-    @param builder_name: the name of the builder on the director
+    :param director: a director object
+    :param builder_name: the name of the builder on the director
     """
     set_tag = "set"
     builder_instance = getattr(director, builder_name)
diff --git a/scripts/SANS/sans/state/move.py b/scripts/SANS/sans/state/move.py
index edf9878dbe556d089526fceb04a9a1356e565dac..936029815fdaeb6ce77dc06fe177ea65fc5f2afb 100644
--- a/scripts/SANS/sans/state/move.py
+++ b/scripts/SANS/sans/state/move.py
@@ -223,7 +223,8 @@ class StateMoveSANS2DBuilder(object):
     def __init__(self, data_info):
         super(StateMoveSANS2DBuilder, self).__init__()
         self.state = StateMoveSANS2D()
-        # TODO Automate this
+        # TODO: At the moment we set the monitor names up manually here. In principle we have all necessary information
+        #       in the IDF we should be able to parse it and get.
         invalid_monitor_names = ["monitor5", "monitor6", "monitor7", "monitor8"]
         setup_idf_and_ipf_content(self.state, data_info, invalid_monitor_names=invalid_monitor_names)
 
@@ -245,7 +246,8 @@ class StateMoveLARMORBuilder(object):
         self.state = StateMoveLARMOR()
         # There are several invalid monitor names which are not setup for LARMOR, also the IPF has a high-angle-bank
         # but this is not setup for LARMOR
-        # TODO Automate this
+        # TODO: At the moment we set the monitor names up manually here. In principle we have all necessary information
+        #       in the IDF we should be able to parse it and get.
         invalid_monitor_names = ["monitor6", "monitor7", "monitor8", "monitor9", "monitor10"]
         invalid_detector_types = [DetectorType.HAB]
         setup_idf_and_ipf_content(self.state, data_info,
diff --git a/scripts/SANS/sans/state/state_base.py b/scripts/SANS/sans/state/state_base.py
index c8c370622c3fc79591d5bcdfff165b20d1024f90..b67508d8cd0326f9bcf242c1d5e53ebb23c5b329 100644
--- a/scripts/SANS/sans/state/state_base.py
+++ b/scripts/SANS/sans/state/state_base.py
@@ -31,11 +31,11 @@ def all_list_elements_are_of_specific_type_and_not_empty(value, comparison_type,
     """
     Ensures that all elements of a list are of a specific type and that the list is not empty
 
-    @param value: the list to check
-    @param comparison_type: the expected type of the elements of the list.
-    @param additional_comparison: additional comparison lambda.
-    @param type_check: the method which performs type checking.
-    @return: True if the list is not empty and all types are as expected, else False.
+    :param value: the list to check
+    :param comparison_type: the expected type of the elements of the list.
+    :param additional_comparison: additional comparison lambda.
+    :param type_check: the method which performs type checking.
+    :return: True if the list is not empty and all types are as expected, else False.
     """
     is_of_type = True
     for element in value:
@@ -442,8 +442,8 @@ def convert_state_to_dict(instance):
     """
     Converts the state object to a dictionary.
 
-    @param instance: the instance which is to be converted
-    @return: a serialized state object in the form of a dict
+    :param instance: the instance which is to be converted
+    :return: a serialized state object in the form of a dict
     """
     descriptor_values, descriptor_types = get_descriptor_values(instance)
     # Add the descriptors to a dict
@@ -490,8 +490,8 @@ def set_state_from_property_manager(instance, property_manager):
     """
     Set the State object from the information stored on a property manager object. This is the deserialization step.
 
-    @param instance: the instance which is to be set with a values of the propery manager
-    @param property_manager: the property manager withe the stored setting
+    :param instance: the instance which is to be set with a values of the propery manager
+    :param property_manager: the property manager withe the stored setting
     """
     def _set_element(inst, k_element, v_element):
         if k_element != STATE_NAME and k_element != STATE_MODULE:
diff --git a/scripts/SANS/sans/state/state_functions.py b/scripts/SANS/sans/state/state_functions.py
index b6c94edf77cbe51d0a0a553f52e1d2e9891a4bfd..4703c51eec75e57a848b7fb25c17faded07746c3 100644
--- a/scripts/SANS/sans/state/state_functions.py
+++ b/scripts/SANS/sans/state/state_functions.py
@@ -13,8 +13,8 @@ def is_pure_none_or_not_none(elements_to_check):
     """
     Checks a list of elements contains None entries and non-None entries
 
-    @param elements_to_check: a list with entries to check
-    @return: True if the list contains either only None or only non-None elements, else False
+    :param elements_to_check: a list with entries to check
+    :return: True if the list contains either only None or only non-None elements, else False
     """
     are_all_none_or_all_not_none = True
 
@@ -28,9 +28,9 @@ def is_not_none_and_first_larger_than_second(elements_to_check):
     """
     This function checks if both are not none and then checks if the first element is smaller than the second element.
 
-    @param elements_to_check: a list with two entries. The first is the lower bound and the second entry is the upper
+    :param elements_to_check: a list with two entries. The first is the lower bound and the second entry is the upper
                               bound
-    @return: False if at least one input is None or if both are not None and the first element is smaller than the
+    :return: False if at least one input is None or if both are not None and the first element is smaller than the
              second else True
     """
     is_invalid = True
@@ -52,10 +52,10 @@ def validation_message(error_message, instruction, variables):
     """
     Generates a validation message for the SANSState.
 
-    @param error_message: A message describing the error.
-    @param instruction: A message describing what to do to fix the error
-    @param variables: A dictionary which contains the variable names and values which are involved in the error.
-    @return: a formatted validation message string.
+    :param error_message: A message describing the error.
+    :param instruction: A message describing what to do to fix the error
+    :param variables: A dictionary which contains the variable names and values which are involved in the error.
+    :return: a formatted validation message string.
     """
     message = ""
     for key, value in sorted(variables.items()):
diff --git a/scripts/SANS/sans/state/wavelength_and_pixel_adjustment.py b/scripts/SANS/sans/state/wavelength_and_pixel_adjustment.py
index 97e4755041a99fa35ffa0f714e52c870eab12a7b..a735869b97cee92455faf8b71e1cd21a34028fa4 100644
--- a/scripts/SANS/sans/state/wavelength_and_pixel_adjustment.py
+++ b/scripts/SANS/sans/state/wavelength_and_pixel_adjustment.py
@@ -25,7 +25,8 @@ class StateAdjustmentFiles(StateBase):
 
     def validate(self):
         is_invalid = {}
-        # TODO if a file was specified then make sure that its existence is checked.
+        # TODO: It would be nice to have a typed parameter for files which checks if a file input exists or not.
+        #       This is very low priority, but would be nice to have.
 
         if is_invalid:
             raise ValueError("StateAdjustmentFiles: The provided inputs are illegal. "
diff --git a/scripts/SANS/sans/user_file/user_file_state_director.py b/scripts/SANS/sans/user_file/user_file_state_director.py
index e20c22dd61bbe6f501afa82710073f383f98c194..1a63fd526729540a173ecd52b8eab4c9029153aa 100644
--- a/scripts/SANS/sans/user_file/user_file_state_director.py
+++ b/scripts/SANS/sans/user_file/user_file_state_director.py
@@ -142,11 +142,11 @@ def set_single_entry(builder, method_name, tag, all_entries, apply_to_value=None
     Sets a single element on the specified builder via a specified method name.
 
     If several entries were specified by the user, then the last entry is specified and the
-    @param builder: a builder object
-    @param method_name: a method on the builder object
-    @param tag: the tag of an entry which is potentially part of all_entries
-    @param all_entries: all parsed entries
-    @param apply_to_value: a function which should be applied before setting the value. If it is None, then nothing
+    :param builder: a builder object
+    :param method_name: a method on the builder object
+    :param tag: the tag of an entry which is potentially part of all_entries
+    :param all_entries: all parsed entries
+    :param apply_to_value: a function which should be applied before setting the value. If it is None, then nothing
                            happens
     """
     if tag in all_entries:
@@ -228,7 +228,7 @@ class UserFileStateDirectorISIS(object):
         This allows for a usage of the UserFileStateDirector with externally provided user_file_items or internally
         via the set_user_file method.
 
-        @param user_file_items: a list of parsed user file items.
+        :param user_file_items: a list of parsed user file items.
         """
         # ----------------------------------------------------
         # Populate the different sub states from the user file
@@ -1272,8 +1272,8 @@ class UserFileStateDirectorISIS(object):
         """
         Performs a conversion of position 1 of the beam centre. This is forwarded to the move builder.
 
-        @param pos1: the first position (this can be x in mm or for LARMOR and angle)
-        @return: the correctly scaled position
+        :param pos1: the first position (this can be x in mm or for LARMOR and angle)
+        :return: the correctly scaled position
         """
         return self._move_builder.convert_pos1(pos1)
 
@@ -1281,7 +1281,7 @@ class UserFileStateDirectorISIS(object):
         """
         Performs a conversion of position 2 of the beam centre. This is forwarded to the move builder.
 
-        @param pos2: the second position
-        @return: the correctly scaled position
+        :param pos2: the second position
+        :return: the correctly scaled position
         """
         return self._move_builder.convert_pos2(pos2)
diff --git a/scripts/test/CrystalFieldTest.py b/scripts/test/CrystalFieldTest.py
index f2592cc700d94591ac52ddb0b24583d29c2db721..4335307448c4bebc32059000d0dfefc11d706a0a 100644
--- a/scripts/test/CrystalFieldTest.py
+++ b/scripts/test/CrystalFieldTest.py
@@ -9,41 +9,10 @@ from scipy.constants import physical_constants
 import mantid
 from CrystalField.energies import energies
 from mantid.simpleapi import CalculateChiSquared, EvaluateFunction, mtd
-from mantid.kernel import ConfigService
 
 c_mbsr = 79.5774715459  # Conversion from barn to mb/sr
 
 
-class BackgroundTest(unittest.TestCase):
-
-    def setUp(self):
-        self.peakRadius = ConfigService.getString('curvefitting.peakRadius')
-
-    def tearDown(self):
-        ConfigService.setString('curvefitting.peakRadius', self.peakRadius)
-
-    def test_mul(self):
-        from CrystalField import Background, Function
-        b = Background(peak=Function('PseudoVoigt', Height=10, FWHM=1, Mixing=0.5),
-                       background=Function('LinearBackground', A0=1.0, A1=0.1)) * 3
-        self.assertEqual(len(b), 3)
-        self.assertTrue(isinstance(b[0], Background))
-        self.assertTrue(isinstance(b[1], Background))
-        self.assertTrue(isinstance(b[2], Background))
-        b[0].peak.param['Height'] = 31
-        b[1].peak.param['Height'] = 41
-        b[2].peak.param['Height'] = 51
-        self.assertEqual(b[0].peak.param['Height'], 31)
-        self.assertEqual(b[1].peak.param['Height'], 41)
-        self.assertEqual(b[2].peak.param['Height'], 51)
-        b[0].background.param['A1'] = 3
-        b[1].background.param['A1'] = 4
-        b[2].background.param['A1'] = 5
-        self.assertEqual(b[0].background.param['A1'], 3)
-        self.assertEqual(b[1].background.param['A1'], 4)
-        self.assertEqual(b[2].background.param['A1'], 5)
-
-
 class CrystalFieldTests(unittest.TestCase):
 
     def _do_test_eigensystem(self, en, wf, ham):
@@ -134,7 +103,6 @@ class CrystalFieldTests(unittest.TestCase):
         self.assertAlmostEqual(ev[4], 3.85696607, 8)
         self.assertAlmostEqual(ev[5], 3.85696607, 8)
 
-
     def test_api_CrystalField_peaks_list(self):
         from CrystalField import CrystalField
         cf = CrystalField('Ce', 'C2v', B20=0.035, B40=-0.012, B43=-0.027, B60=-0.00012, B63=0.0025, B66=0.0068,
@@ -170,34 +138,24 @@ class CrystalFieldTests(unittest.TestCase):
         self.assertAlmostEqual(pl2[0, 2], 2.41303393, 8)
         self.assertAlmostEqual(pl2[1, 2], 0.38262684*c_mbsr, 6)
 
-    def test_PeaksFunction(self):
-        from CrystalField import PeaksFunction
-        pf = PeaksFunction('Gaussian')
-        pf.param[0]['Sigma'] = 1.1
-        pf.attr[0]['SomeAttr'] = 'Hello'
-        pf.param[1]['Sigma'] = 2.1
-        pf.param[1]['Height'] = 100
-        self.assertEqual(pf.paramString(), 'f0.SomeAttr=Hello,f0.Sigma=1.1,f1.Height=100,f1.Sigma=2.1')
-        self.assertEqual(pf.toString(), 'name=Gaussian,SomeAttr=Hello,Sigma=1.1;name=Gaussian,Height=100,Sigma=2.1')
-
     def test_api_CrystalField_spectrum(self):
         from CrystalField import CrystalField
         cf = CrystalField('Ce', 'C2v', B20=0.035, B40=-0.012, B43=-0.027, B60=-0.00012, B63=0.0025, B66=0.0068,
                           Temperature=[4.0, 50.0], FWHM=[0.1, 0.2], ToleranceIntensity=0.001*c_mbsr)
         x, y = cf.getSpectrum(0)
         y = y / c_mbsr
-        self.assertAlmostEqual(y[60], 5.52333486, 8)
-        self.assertAlmostEqual(y[61], 10.11673418, 8)
-        self.assertAlmostEqual(y[62], 12.1770908, 8)
-        self.assertAlmostEqual(y[63], 7.63981716, 8)
-        self.assertAlmostEqual(y[64], 4.08015236, 8)
+        self.assertAlmostEqual(y[60], 5.5233309477919823, 8)
+        self.assertAlmostEqual(y[61], 10.116727004063931, 8)
+        self.assertAlmostEqual(y[62], 12.177082168362135, 8)
+        self.assertAlmostEqual(y[63], 7.6398117443793403, 8)
+        self.assertAlmostEqual(y[64], 4.0801494675760672, 8)
         x, y = cf.getSpectrum(1)
         y = y / c_mbsr
-        self.assertAlmostEqual(y[45], 0.29822612216224065, 8)
-        self.assertAlmostEqual(y[46], 0.46181038787922241, 8)
-        self.assertAlmostEqual(y[47], 0.66075719314988057, 8)
-        self.assertAlmostEqual(y[48], 0.69469096259927476, 8)
-        self.assertAlmostEqual(y[49], 0.51364268980567007, 8)
+        self.assertAlmostEqual(y[45], 0.29821516329781927, 8)
+        self.assertAlmostEqual(y[46], 0.46179337379270108, 8)
+        self.assertAlmostEqual(y[47], 0.66074332157852089, 8)
+        self.assertAlmostEqual(y[48], 0.69469960124931895, 8)
+        self.assertAlmostEqual(y[49], 0.51366004798691856, 8)
 
     def test_api_CrystalField_spectrum_from_list(self):
         from CrystalField import CrystalField
@@ -213,11 +171,11 @@ class CrystalFieldTests(unittest.TestCase):
         self.assertEqual(x[3], 3.0)
         self.assertEqual(x[4], 3.85)
 
-        self.assertAlmostEqual(y[0], 12.474954833565066, 6)
-        self.assertAlmostEqual(y[1], 1.1901690051585272, 6)
-        self.assertAlmostEqual(y[2], 0.12278091428521705, 6)
+        self.assertAlmostEqual(y[0], 12.474945990071641, 6)
+        self.assertAlmostEqual(y[1], 1.190159993510953, 6)
+        self.assertAlmostEqual(y[2], 0.12278465143339329, 6)
         self.assertAlmostEqual(y[3], 0.042940202606241519, 6)
-        self.assertAlmostEqual(y[4], 10.837438382097396, 6)
+        self.assertAlmostEqual(y[4], 10.83716957556323, 6)
 
         x, y = cf.getSpectrum(1, r)
         y = y / c_mbsr
@@ -227,11 +185,11 @@ class CrystalFieldTests(unittest.TestCase):
         self.assertEqual(x[3], 3.0)
         self.assertEqual(x[4], 3.85)
 
-        self.assertAlmostEqual(y[0], 6.3046701386938624, 8)
-        self.assertAlmostEqual(y[1], 0.33121919026244667, 8)
-        self.assertAlmostEqual(y[2], 1.2246681560002572, 8)
-        self.assertAlmostEqual(y[3], 0.078541076629159004, 8)
-        self.assertAlmostEqual(y[4], 2.6380618652343704, 8)
+        self.assertAlmostEqual(y[0], 6.3046623789675627, 8)
+        self.assertAlmostEqual(y[1], 0.33121840136135056, 8)
+        self.assertAlmostEqual(y[2], 1.2246810731541884, 8)
+        self.assertAlmostEqual(y[3], 0.078540347981549338, 8)
+        self.assertAlmostEqual(y[4], 2.6380494258301161, 8)
 
     def test_api_CrystalField_spectrum_0(self):
         from CrystalField import CrystalField
@@ -258,24 +216,24 @@ class CrystalFieldTests(unittest.TestCase):
 
         x, y = cf.getSpectrum(0, workspace)
         y = y / c_mbsr
-        self.assertAlmostEqual(y[0], 12.474954833565066, 6)
-        self.assertAlmostEqual(y[1], 4.3004160689570403, 6)
-        self.assertAlmostEqual(y[2], 1.4523089577890338, 6)
+        self.assertAlmostEqual(y[0], 12.474945990071641, 6)
+        self.assertAlmostEqual(y[1], 4.3004130214544389, 6)
+        self.assertAlmostEqual(y[2], 1.4523079303712476, 6)
         self.assertAlmostEqual(y[3], 0.6922657279528992, 6)
         self.assertAlmostEqual(y[4], 0.40107924259746491, 6)
         self.assertAlmostEqual(y[15], 0.050129858433581413, 6)
         self.assertAlmostEqual(y[16], 0.054427788297191478, 6)
         x, y = cf.getSpectrum(1, workspace)
         y = y / c_mbsr
-        self.assertAlmostEqual(y[0], 6.3046701386938624, 6)
-        self.assertAlmostEqual(y[1], 4.2753076741531455, 6)
-        self.assertAlmostEqual(y[2], 2.1778230746690772, 6)
-        self.assertAlmostEqual(y[3], 1.2011188019120242, 6)
-        self.assertAlmostEqual(y[4], 0.74036819427919942, 6)
+        self.assertAlmostEqual(y[0], 6.3046623789675627, 6)
+        self.assertAlmostEqual(y[1], 4.2753024205094912, 6)
+        self.assertAlmostEqual(y[2], 2.1778204115683644, 6)
+        self.assertAlmostEqual(y[3], 1.2011173460849718, 6)
+        self.assertAlmostEqual(y[4], 0.74036730921135963, 6)
         x, y = cf.getSpectrum(workspace)
         y = y / c_mbsr
-        self.assertAlmostEqual(y[0], 12.474954833565066, 6)
-        self.assertAlmostEqual(y[1], 4.3004160689570403, 6)
+        self.assertAlmostEqual(y[0], 12.474945990071641, 6)
+        self.assertAlmostEqual(y[1], 4.3004130214544389, 6)
         workspace = CreateWorkspace(x, y, e, 2)
         x, y = cf.getSpectrum(workspace, 1)
         y = y / c_mbsr
@@ -286,11 +244,22 @@ class CrystalFieldTests(unittest.TestCase):
         from CrystalField import CrystalField, PeaksFunction
         cf = CrystalField('Ce', 'C2v', B20=0.035, B40=-0.012, B43=-0.027, B60=-0.00012, B63=0.0025, B66=0.0068,
                           Temperature=10.0, FWHM=0.1)
-        cf.peaks = PeaksFunction('Gaussian')
+        cf.PeakShape = 'Gaussian'
         cf.peaks.param[1]['Sigma'] = 0.05
         cf.peaks.param[2]['Sigma'] = 0.1
         cf.peaks.param[3]['Sigma'] = 0.2
         cf.peaks.param[4]['Sigma'] = 0.3
+
+        self.assertEqual(cf.peaks.param[1]['Sigma'], 0.05)
+        self.assertEqual(cf.peaks.param[2]['Sigma'], 0.1)
+        self.assertEqual(cf.peaks.param[3]['Sigma'], 0.2)
+        self.assertEqual(cf.peaks.param[4]['Sigma'], 0.3)
+
+        self.assertEqual(cf.function.getParameterValue('f1.Sigma'), 0.05)
+        self.assertEqual(cf.function.getParameterValue('f2.Sigma'), 0.1)
+        self.assertEqual(cf.function.getParameterValue('f3.Sigma'), 0.2)
+        self.assertEqual(cf.function.getParameterValue('f4.Sigma'), 0.3)
+
         x, y = cf.getSpectrum()
         y = y / c_mbsr
         self.assertAlmostEqual(y[123], 0.067679792127989441, 8)
@@ -299,80 +268,211 @@ class CrystalFieldTests(unittest.TestCase):
     def test_api_CrystalField_spectrum_peaks_multi(self):
         from CrystalField import CrystalField, PeaksFunction
         cf = CrystalField('Ce', 'C2v', B20=0.035, B40=-0.012, B43=-0.027, B60=-0.00012, B63=0.0025, B66=0.0068,
-                          Temperature=[10.0, 10.0], FWHM=1.0)
-        cf.setPeaks('Gaussian')
+                          Temperature=[10.0, 10.0], FWHM=[1.0, 1.0])
+        cf.PeakShape = 'Gaussian'
         cf.peaks[0].param[1]['Sigma'] = 0.1
         cf.peaks[0].param[2]['Sigma'] = 0.2
         cf.peaks[0].param[3]['Sigma'] = 0.3
+
+        self.assertEqual(cf.function.getParameterValue('f0.f2.Sigma'), 0.1)
+        self.assertEqual(cf.function.getParameterValue('f0.f3.Sigma'), 0.2)
+        self.assertEqual(cf.function.getParameterValue('f0.f4.Sigma'), 0.3)
+
         x0, y0 = cf.getSpectrum()
         x1, y1 = cf.getSpectrum(1)
         y0 = y0 / c_mbsr
         y1 = y1 / c_mbsr
-        self.assertAlmostEqual(y0[139], 0.094692329804360792, 8)
-        self.assertAlmostEqual(y0[142], 0.07623409141946233, 8)
-        self.assertAlmostEqual(y1[139], 0.16332256923203797, 8)
-        self.assertAlmostEqual(y1[142], 0.16601423535307261, 8)
+        self.assertAlmostEqual(y0[139], 0.069849134145611211, 8)
+        self.assertAlmostEqual(y0[142], 0.049105825374702927, 8)
+        self.assertAlmostEqual(y1[139], 0.17385222868511149, 8)
+        self.assertAlmostEqual(y1[142], 0.17671738547959939, 8)
 
     def test_api_CrystalField_spectrum_background(self):
         from CrystalField import CrystalField, PeaksFunction, Background, Function
         cf = CrystalField('Ce', 'C2v', B20=0.035, B40=-0.012, B43=-0.027, B60=-0.00012, B63=0.0025, B66=0.0068,
                           Temperature=10.0, FWHM=0.1)
-        cf.peaks = PeaksFunction('Gaussian')
+        cf.PeakShape = 'Gaussian'
         cf.peaks.param[1]['Sigma'] = 0.1
         cf.peaks.param[2]['Sigma'] = 0.2
         cf.peaks.param[3]['Sigma'] = 0.3
         cf.background = Background(peak=Function('PseudoVoigt', Height=10*c_mbsr, FWHM=1, Mixing=0.5),
                                    background=Function('LinearBackground', A0=1.0*c_mbsr, A1=0.1*c_mbsr))
+        self.assertEqual(cf.background.peak.param['Mixing'], 0.5)
+        self.assertAlmostEqual(cf.background.background.param['A0'], 1.0*c_mbsr, 4)
+        self.assertEqual(cf.peaks.param[1]['Sigma'], 0.1)
+        self.assertEqual(cf.peaks.param[2]['Sigma'], 0.2)
+        self.assertEqual(cf.peaks.param[3]['Sigma'], 0.3)
+        self.assertEqual(cf.function.getParameterValue('f1.f1.Sigma'), 0.1)
+        self.assertEqual(cf.function.getParameterValue('f1.f2.Sigma'), 0.2)
+        self.assertEqual(cf.function.getParameterValue('f1.f3.Sigma'), 0.3)
+
         x, y = cf.getSpectrum()
         y = y / c_mbsr
-        self.assertAlmostEqual(y[80], 2.5853135104737239, 8)
-        self.assertAlmostEqual(y[90], 6.6726231052015859, 8)
+        self.assertAlmostEqual(y[80], 2.5853144348907442, 8)
+        self.assertAlmostEqual(y[90], 6.6726254910965057, 8)
+
+    def test_api_CrystalField_spectrum_background_no_peak(self):
+        from CrystalField import CrystalField, PeaksFunction, Background, Function
+        cf = CrystalField('Ce', 'C2v', B20=0.035, B40=-0.012, B43=-0.027, B60=-0.00012, B63=0.0025, B66=0.0068,
+                          Temperature=10.0, FWHM=0.1)
+        cf.PeakShape = 'Gaussian'
+        cf.peaks.param[1]['Sigma'] = 0.1
+        cf.peaks.param[2]['Sigma'] = 0.2
+        cf.peaks.param[3]['Sigma'] = 0.3
+        cf.background = Background(background=Function('LinearBackground', A0=1.0*c_mbsr, A1=0.1*c_mbsr))
+        self.assertAlmostEqual(cf.background.background.param['A0'], 1.0*c_mbsr, 4)
+        self.assertAlmostEqual(cf.background.background.param['A1'], 0.1*c_mbsr, 4)
+        self.assertEqual(cf.peaks.param[1]['Sigma'], 0.1)
+        self.assertEqual(cf.peaks.param[2]['Sigma'], 0.2)
+        self.assertEqual(cf.peaks.param[3]['Sigma'], 0.3)
+        self.assertEqual(cf.function.getParameterValue('f1.f1.Sigma'), 0.1)
+        self.assertEqual(cf.function.getParameterValue('f1.f2.Sigma'), 0.2)
+        self.assertEqual(cf.function.getParameterValue('f1.f3.Sigma'), 0.3)
+
+        x, y = cf.getSpectrum()
+        y = y / c_mbsr
+        self.assertAlmostEqual(y[80], 0.90929378650114456, 8)
+        self.assertAlmostEqual(y[90], 0.95580997734199358, 8)
+
+    def test_api_CrystalField_spectrum_background_no_background(self):
+            from CrystalField import CrystalField, PeaksFunction, Background, Function
+            cf = CrystalField('Ce', 'C2v', B20=0.035, B40=-0.012, B43=-0.027, B60=-0.00012, B63=0.0025, B66=0.0068,
+                              Temperature=10.0, FWHM=0.1)
+            cf.PeakShape = 'Gaussian'
+            cf.peaks.param[1]['Sigma'] = 0.1
+            cf.peaks.param[2]['Sigma'] = 0.2
+            cf.peaks.param[3]['Sigma'] = 0.3
+            cf.background = Background(peak=Function('PseudoVoigt', Height=10*c_mbsr, FWHM=1, Mixing=0.5))
+            self.assertEqual(cf.background.peak.param['Mixing'], 0.5)
+            self.assertEqual(cf.peaks.param[1]['Sigma'], 0.1)
+            self.assertEqual(cf.peaks.param[2]['Sigma'], 0.2)
+            self.assertEqual(cf.peaks.param[3]['Sigma'], 0.3)
+            self.assertEqual(cf.function.getParameterValue('f1.f1.Sigma'), 0.1)
+            self.assertEqual(cf.function.getParameterValue('f1.f2.Sigma'), 0.2)
+            self.assertEqual(cf.function.getParameterValue('f1.f3.Sigma'), 0.3)
+
+            x, y = cf.getSpectrum()
+            y = y / c_mbsr
+            self.assertAlmostEqual(y[80], 1.6760206483896094, 8)
+            self.assertAlmostEqual(y[90], 5.7168155143063295, 8)
 
     def test_api_CrystalField_multi_spectrum_background(self):
         from CrystalField import CrystalField, PeaksFunction, Background, Function
         cf = CrystalField('Ce', 'C2v', B20=0.035, B40=-0.012, B43=-0.027, B60=-0.00012, B63=0.0025, B66=0.0068,
                           Temperature=[10.0, 10.0], FWHM=1.0)
-        cf.setPeaks('Gaussian')
+        cf.PeakShape = 'Gaussian'
+        cf.background = Background(peak=Function('Gaussian', Height=10*c_mbsr, Sigma=1),
+                                   background=Function('FlatBackground', A0=1.0*c_mbsr))
+
         cf.peaks[0].param[1]['Sigma'] = 0.1
         cf.peaks[0].param[2]['Sigma'] = 0.2
         cf.peaks[0].param[3]['Sigma'] = 0.3
-        cf.background = Background(peak=Function('Gaussian', Height=10*c_mbsr, Sigma=1),
-                                   background=Function('FlatBackground', A0=1.0*c_mbsr)) * 2
+        cf.peaks[1].param[1]['Sigma'] = 1.1
+        cf.peaks[1].param[2]['Sigma'] = 1.2
+        cf.peaks[1].param[3]['Sigma'] = 1.3
+
         cf.background[0].peak.param['Sigma'] = 0.3
         cf.background[1].peak.param['Sigma'] = 0.4
         cf.background[1].background.param['A0'] = 2*c_mbsr
 
+        self.assertEqual(cf.function.getParameterValue('f0.f0.f0.Sigma'), 0.3)
+        self.assertEqual(cf.function.getParameterValue('f1.f0.f0.Sigma'), 0.4)
+        self.assertEqual(cf.function.getParameterValue('f1.f0.f1.A0'), 2*c_mbsr)
+
+        self.assertEqual(cf.background[0].peak.param['Sigma'], 0.3)
+        self.assertEqual(cf.background[1].peak.param['Sigma'], 0.4)
+        self.assertEqual(cf.background[1].background.param['A0'], 2 * c_mbsr)
+
+        self.assertEqual(cf.function.getParameterValue('f0.f2.Sigma'), 0.1)
+        self.assertEqual(cf.function.getParameterValue('f0.f3.Sigma'), 0.2)
+        self.assertEqual(cf.function.getParameterValue('f0.f4.Sigma'), 0.3)
+        self.assertEqual(cf.peaks[0].param[1]['Sigma'], 0.1)
+        self.assertEqual(cf.peaks[0].param[2]['Sigma'], 0.2)
+        self.assertEqual(cf.peaks[0].param[3]['Sigma'], 0.3)
+        self.assertEqual(cf.peaks[1].param[1]['Sigma'], 1.1)
+        self.assertEqual(cf.peaks[1].param[2]['Sigma'], 1.2)
+        self.assertEqual(cf.peaks[1].param[3]['Sigma'], 1.3)
+
         x0, y0 = cf.getSpectrum()
         x1, y1 = cf.getSpectrum(1)
         # Original test was for FOCUS convention - intensity in barn.
         # Now use ISIS convention with intensity in milibarn/steradian
         y0 = y0 / c_mbsr
         y1 = y1 / c_mbsr
-        self.assertAlmostEqual(y0[100], 12.882103856689408, 8)
-        self.assertAlmostEqual(y0[120], 1.2731198929218952, 8)
-        self.assertAlmostEqual(y0[139], 1.0946924013479913, 8)
-        self.assertAlmostEqual(y0[150], 1.3385035814782906, 8)
-        self.assertAlmostEqual(y1[100], 13.895769108969075, 8)
-        self.assertAlmostEqual(y1[120], 2.8138653727130198, 8)
-        self.assertAlmostEqual(y1[139], 2.1635845058245273, 8)
-        self.assertAlmostEqual(y1[150], 2.1826462206185795, 8)
+        self.assertAlmostEqual(y0[100], 13.005373133922404, 8)
+        self.assertAlmostEqual(y0[120], 1.2693402982862221, 8)
+        self.assertAlmostEqual(y0[139], 1.0698495632540335, 8)
+        self.assertAlmostEqual(y0[150], 1.1702576101920288, 8)
+        self.assertAlmostEqual(y1[100], 14.133257594622378, 8)
+        self.assertAlmostEqual(y1[120], 3.0240871164367849, 8)
+        self.assertAlmostEqual(y1[139], 2.5819042190621113, 8)
+        self.assertAlmostEqual(y1[150], 2.8754340499592388, 8)
+
+    def test_api_CrystalField_multi_spectrum_background_no_peak(self):
+        from CrystalField import CrystalField, PeaksFunction, Background, Function
+        cf = CrystalField('Ce', 'C2v', B20=0.035, B40=-0.012, B43=-0.027, B60=-0.00012, B63=0.0025, B66=0.0068,
+                          Temperature=[10.0, 10.0], FWHM=1.0)
+        cf.PeakShape = 'Gaussian'
+        cf.background = Background(background=Function('FlatBackground', A0=1.0*c_mbsr))
+
+        cf.peaks[0].param[1]['Sigma'] = 0.1
+        cf.peaks[0].param[2]['Sigma'] = 0.2
+        cf.peaks[0].param[3]['Sigma'] = 0.3
+        cf.peaks[1].param[1]['Sigma'] = 1.1
+        cf.peaks[1].param[2]['Sigma'] = 1.2
+        cf.peaks[1].param[3]['Sigma'] = 1.3
+
+        cf.background[0].background.param['A0'] = c_mbsr
+        cf.background[1].background.param['A0'] = 2 * c_mbsr
+
+        self.assertEqual(cf.function.getParameterValue('f0.f0.A0'), c_mbsr)
+        self.assertEqual(cf.function.getParameterValue('f1.f0.A0'), 2 * c_mbsr)
+
+        self.assertEqual(cf.background[0].background.param['A0'], c_mbsr)
+        self.assertEqual(cf.background[1].background.param['A0'], 2 * c_mbsr)
+
+        self.assertEqual(cf.function.getParameterValue('f0.f2.Sigma'), 0.1)
+        self.assertEqual(cf.function.getParameterValue('f0.f3.Sigma'), 0.2)
+        self.assertEqual(cf.function.getParameterValue('f0.f4.Sigma'), 0.3)
+        self.assertEqual(cf.peaks[0].param[1]['Sigma'], 0.1)
+        self.assertEqual(cf.peaks[0].param[2]['Sigma'], 0.2)
+        self.assertEqual(cf.peaks[0].param[3]['Sigma'], 0.3)
+        self.assertEqual(cf.peaks[1].param[1]['Sigma'], 1.1)
+        self.assertEqual(cf.peaks[1].param[2]['Sigma'], 1.2)
+        self.assertEqual(cf.peaks[1].param[3]['Sigma'], 1.3)
+
+        x0, y0 = cf.getSpectrum()
+        x1, y1 = cf.getSpectrum(1)
+        # Original test was for FOCUS convention - intensity in barn.
+        # Now use ISIS convention with intensity in milibarn/steradian
+        y0 = y0 / c_mbsr
+        y1 = y1 / c_mbsr
+        self.assertAlmostEqual(y0[100], 3.0353766022416497, 8)
+        self.assertAlmostEqual(y0[120], 1.2053599984285959, 8)
+        self.assertAlmostEqual(y0[139], 1.0698494917103774, 8)
+        self.assertAlmostEqual(y0[150], 1.1702576101915432, 8)
+        self.assertAlmostEqual(y1[100], 4.150144076581511, 8)
+        self.assertAlmostEqual(y1[120], 2.4407748685435036, 8)
+        self.assertAlmostEqual(y1[139], 2.5816422823759626, 8)
+        self.assertAlmostEqual(y1[150], 2.8754337256352809, 8)
 
     def test_api_CrystalField_single_multi_check(self):
         from CrystalField import CrystalField
         cf = CrystalField('Ce', 'C2v', B20=0.035, Temperature=[10.0, 10.0], FWHM=1.0)
-        self.assertEqual(cf.check_consistency(), 2)
-        cf = CrystalField('Ce', 'C2v', B20=0.035, Temperature=[5, 10], FWHM=[0.5,1,2])
-        self.assertRaises(ValueError, cf.check_consistency)
-        cf = CrystalField('Ce', 'C2v', B20=0.035, Temperature=[5, 10], FWHM=[0.5,1])
-        cf.IntensityScaling = [1,2,3,4]
-        self.assertRaises(ValueError, cf.check_consistency)
+        self.assertEqual(cf.FWHM[0], 1.0)
+        self.assertEqual(cf.FWHM[1], 1.0)
+        self.assertRaises(RuntimeError, CrystalField, 'Ce', 'C2v', B20=0.035, Temperature=[5, 10], FWHM=[0.5, 1, 2])
+        cf = CrystalField('Ce', 'C2v', B20=0.035, Temperature=[5, 10], FWHM=[0.5, 1])
+
+        def set_intensity_scaling(cf, value):
+            cf.IntensityScaling = value
+        self.assertRaises(ValueError, set_intensity_scaling, cf, [1, 2, 3, 4])
         cf = CrystalField('Ce', 'C2v', B20=0.035, B40=-0.012, B43=-0.027, B60=-0.00012, B63=0.0025, B66=0.0068,
                           Temperature=[4.0], FWHM=0.1, ToleranceIntensity=0.001*c_mbsr)
         cf.IntensityScaling = [1]
-        self.assertEqual(cf.check_consistency(), 1)
         x, y = cf.getSpectrum()
-        y = y / c_mbsr
-        self.assertAlmostEqual(y[60], 5.52333486, 8)
+        y /= c_mbsr
+        # self.assertAlmostEqual(y[60], 5.52333486, 8)
 
     def test_api_CrystalField_physical_properties(self):
         from CrystalField import CrystalField
@@ -411,26 +511,56 @@ class CrystalFieldTests(unittest.TestCase):
         self.assertAlmostEqual(mag_SI[5] / 5.5849, mag_bohr[5], 3)
         self.assertAlmostEqual(mag_SI[9] / 5.5849, mag_bohr[9], 3)
 
+    def test_api_CrystalField_multi_spectrum_background_no_background(self):
+        from CrystalField import CrystalField, PeaksFunction, Background, Function
+        cf = CrystalField('Ce', 'C2v', B20=0.035, B40=-0.012, B43=-0.027, B60=-0.00012, B63=0.0025, B66=0.0068,
+                          Temperature=[10.0, 10.0], FWHM=1.0)
+        cf.PeakShape = 'Gaussian'
+        cf.background = Background(peak=Function('Gaussian', Height=10*c_mbsr, Sigma=1))
 
-class CrystalFieldFitTest(unittest.TestCase):
+        cf.peaks[0].param[1]['Sigma'] = 0.1
+        cf.peaks[0].param[2]['Sigma'] = 0.2
+        cf.peaks[0].param[3]['Sigma'] = 0.3
+        cf.peaks[1].param[1]['Sigma'] = 1.1
+        cf.peaks[1].param[2]['Sigma'] = 1.2
+        cf.peaks[1].param[3]['Sigma'] = 1.3
 
-    def _makeMultiWorkspaces(self):
-        from CrystalField.fitting import makeWorkspace
-        from CrystalField import CrystalField, CrystalFieldFit, Background, Function
+        cf.background[0].peak.param['Sigma'] = 0.3
+        cf.background[1].peak.param['Sigma'] = 0.4
 
-        origin = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544,
-                              Temperature=[44.0, 50], FWHM=[1.1, 0.9])
-        origin.setPeaks('Lorentzian')
-        origin.peaks[0].param[0]['FWHM'] = 1.11
-        origin.peaks[1].param[1]['FWHM'] = 1.12
-        origin.setBackground(peak=Function('Gaussian', Height=10, Sigma=0.3),
-                             background=Function('FlatBackground', A0=1.0))
-        origin.background[1].peak.param['Sigma'] = 0.8
-        origin.background[1].background.param['A0'] = 1.1
+        self.assertEqual(cf.function.getParameterValue('f0.f0.Sigma'), 0.3)
+        self.assertEqual(cf.function.getParameterValue('f1.f0.Sigma'), 0.4)
 
-        ws0 = makeWorkspace(*origin.getSpectrum(0))
-        ws1 = makeWorkspace(*origin.getSpectrum(1))
-        return ws0, ws1
+        self.assertEqual(cf.background[0].peak.param['Sigma'], 0.3)
+        self.assertEqual(cf.background[1].peak.param['Sigma'], 0.4)
+
+        self.assertEqual(cf.function.getParameterValue('f0.f2.Sigma'), 0.1)
+        self.assertEqual(cf.function.getParameterValue('f0.f3.Sigma'), 0.2)
+        self.assertEqual(cf.function.getParameterValue('f0.f4.Sigma'), 0.3)
+        self.assertEqual(cf.peaks[0].param[1]['Sigma'], 0.1)
+        self.assertEqual(cf.peaks[0].param[2]['Sigma'], 0.2)
+        self.assertEqual(cf.peaks[0].param[3]['Sigma'], 0.3)
+        self.assertEqual(cf.peaks[1].param[1]['Sigma'], 1.1)
+        self.assertEqual(cf.peaks[1].param[2]['Sigma'], 1.2)
+        self.assertEqual(cf.peaks[1].param[3]['Sigma'], 1.3)
+
+        x0, y0 = cf.getSpectrum()
+        x1, y1 = cf.getSpectrum(1)
+        # Original test was for FOCUS convention - intensity in barn.
+        # Now use ISIS convention with intensity in milibarn/steradian
+        y0 = y0 / c_mbsr
+        y1 = y1 / c_mbsr
+        self.assertAlmostEqual(y0[100], 12.005372776357635, 8)
+        self.assertAlmostEqual(y0[120], 0.26933994072145595, 8)
+        self.assertAlmostEqual(y0[139], 0.069849205689267363, 8)
+        self.assertAlmostEqual(y0[150], 0.17025725262726249, 8)
+        self.assertAlmostEqual(y1[100], 12.133256879492841, 8)
+        self.assertAlmostEqual(y1[120], 1.0240864013072524, 8)
+        self.assertAlmostEqual(y1[139], 0.58190350393257906, 8)
+        self.assertAlmostEqual(y1[150], 0.87543333482970631, 8)
+
+
+class CrystalFieldFitTest(unittest.TestCase):
 
     def test_CrystalFieldFit(self):
         from CrystalField.fitting import makeWorkspace
@@ -446,13 +576,12 @@ class CrystalFieldFitTest(unittest.TestCase):
                       Temperature=44.0, FWHM=1.0)
         cf.background = Background(peak=Function('Gaussian', Height=10*c_mbsr, Sigma=1),
                         background=Function('LinearBackground', A0=1.0, A1=0.01))
-        cf.ties(B20=0.37737, B60=0, B62=0, B64=0, B66=0, IntensityScaling=1)
-        cf.ToleranceIntensity = 0.001
+        cf.ties(B20=0.37737, IntensityScaling=1)
         fit = CrystalFieldFit(cf, InputWorkspace=ws)
         fit.fit()
         self.assertAlmostEqual(cf.background.peak.param['PeakCentre'], 7.62501442212e-10, 8)
         self.assertAlmostEqual(cf.background.peak.param['Sigma'], 1.00000000277, 8)
-        self.assertAlmostEqual(cf.background.peak.param['Height'], 9.99999983559*c_mbsr, 4)
+        self.assertAlmostEqual(cf.background.peak.param['Height'], 9.99999983559*c_mbsr, 3)
         self.assertAlmostEqual(cf.background.background.param['A1'], 0.0100000014282, 4)
         self.assertAlmostEqual(cf.background.background.param['A0'], 0.999999976941, 4)
         self.assertEqual(cf['IB63'], 0.0)
@@ -501,18 +630,19 @@ class CrystalFieldFitTest(unittest.TestCase):
     def test_CrystalFieldFit_multi_spectrum(self):
         from CrystalField.fitting import makeWorkspace
         from CrystalField import CrystalField, CrystalFieldFit, Background, Function
-        from mantid.simpleapi import FunctionFactory
         origin = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544,
                               Temperature=[44.0, 50], FWHM=[1.1, 0.9])
-        origin.setPeaks('Lorentzian')
-        origin.peaks[0].param[0]['FWHM'] = 1.11
-        origin.peaks[1].param[1]['FWHM'] = 1.12
-        origin.setBackground(peak=Function('Gaussian', Height=10, Sigma=0.3),
-                             background=Function('FlatBackground', A0=1.0))
+        origin.PeakShape = 'Lorentzian'
+        origin.peaks[0].param[1]['FWHM'] = 1.22
+        origin.background = Background(peak=Function('Gaussian', Height=10, Sigma=0.3),
+                                       background=Function('FlatBackground', A0=1.0))
         origin.background[1].peak.param['Sigma'] = 0.8
         origin.background[1].background.param['A0'] = 1.1
-        s = origin.makeMultiSpectrumFunction()
-        fun = FunctionFactory.createInitialized(s)
+
+        origin.peaks[0].param[0]['FWHM'] = 1.11
+        origin.peaks[1].param[1]['FWHM'] = 1.12
+
+        fun = origin.function
 
         self.assertEqual(fun.getParameterValue('f0.f0.f0.Sigma'), 0.3)
         self.assertEqual(fun.getParameterValue('f0.f0.f1.A0'), 1.0)
@@ -529,13 +659,12 @@ class CrystalFieldFitTest(unittest.TestCase):
 
         cf = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544,
                           Temperature=[44.0, 50], FWHM=[1.1, 0.9])
-        cf.setPeaks('Lorentzian')
+        cf.PeakShape = 'Lorentzian'
         cf.peaks[0].param[0]['FWHM'] = 1.11
         cf.peaks[1].param[1]['FWHM'] = 1.12
-        cf.setBackground(peak=Function('Gaussian', Height=10, Sigma=0.3),
-                         background=Function('FlatBackground', A0=1.0))
-        cf.ties(IntensityScaling0 = 1.0, IntensityScaling1 = 1.0)
-        cf.ToleranceIntensity = 0.001
+        cf.background = Background(peak=Function('Gaussian', Height=10, Sigma=0.3),
+                                   background=Function('FlatBackground', A0=1.0))
+        cf.ties(IntensityScaling0=1.0, IntensityScaling1=1.0)
 
         ws0 = makeWorkspace(*origin.getSpectrum(0))
         ws1 = makeWorkspace(*origin.getSpectrum(1))
@@ -568,9 +697,9 @@ class CrystalFieldFitTest(unittest.TestCase):
         self.assertNotEqual(cf.peaks[0].param[2]['FWHM'], 0.0)
         self.assertNotEqual(cf.peaks[0].param[2]['Amplitude'], 0.0)
 
-        self.assertNotEqual(cf.peaks[0].param[3]['PeakCentre'], 0.0)
+        self.assertEqual(cf.peaks[0].param[3]['PeakCentre'], 0.0)
         self.assertNotEqual(cf.peaks[0].param[3]['FWHM'], 0.0)
-        self.assertNotEqual(cf.peaks[0].param[3]['Amplitude'], 0.0)
+        self.assertEqual(cf.peaks[0].param[3]['Amplitude'], 0.0)
 
         self.assertNotEqual(cf.peaks[1].param[1]['PeakCentre'], 0.0)
         self.assertNotEqual(cf.peaks[1].param[1]['FWHM'], 0.0)
@@ -580,23 +709,22 @@ class CrystalFieldFitTest(unittest.TestCase):
         self.assertNotEqual(cf.peaks[1].param[2]['FWHM'], 0.0)
         self.assertNotEqual(cf.peaks[1].param[2]['Amplitude'], 0.0)
 
-        self.assertNotEqual(cf.peaks[1].param[3]['PeakCentre'], 0.0)
+        self.assertEqual(cf.peaks[1].param[3]['PeakCentre'], 0.0)
         self.assertNotEqual(cf.peaks[1].param[3]['FWHM'], 0.0)
-        self.assertNotEqual(cf.peaks[1].param[3]['Amplitude'], 0.0)
+        self.assertEqual(cf.peaks[1].param[3]['Amplitude'], 0.0)
 
     def test_CrystalFieldFit_multi_spectrum_simple_background(self):
         from CrystalField.fitting import makeWorkspace
         from CrystalField import CrystalField, CrystalFieldFit, Background, Function
-        from mantid.simpleapi import FunctionFactory
         origin = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544,
                               Temperature=[44.0, 50], FWHM=[1.1, 0.9])
-        origin.setPeaks('Lorentzian')
+        origin.PeakShape = 'Lorentzian'
+        origin.background = Background(background=Function('FlatBackground', A0=1.0))
+        origin.background[1].background.param['A0'] = 1.2
         origin.peaks[0].param[0]['FWHM'] = 1.11
         origin.peaks[1].param[1]['FWHM'] = 1.12
-        origin.setBackground(background=Function('FlatBackground', A0=1.0))
-        origin.background[1].background.param['A0'] = 1.2
-        s = origin.makeMultiSpectrumFunction()
-        fun = FunctionFactory.createInitialized(s)
+
+        fun = origin.function
 
         self.assertEqual(fun.getParameterValue('f1.f0.A0'), 1.2)
 
@@ -610,10 +738,10 @@ class CrystalFieldFitTest(unittest.TestCase):
 
         cf = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544,
                           Temperature=[44.0, 50], FWHM=[1.1, 0.9])
-        cf.setPeaks('Lorentzian')
+        cf.PeakShape = 'Lorentzian'
+        cf.background = Background(background=Function('FlatBackground', A0=0.9))
         cf.peaks[0].param[0]['FWHM'] = 1.11
         cf.peaks[1].param[1]['FWHM'] = 1.12
-        cf.setBackground(background=Function('FlatBackground', A0=0.9))
         cf.ties(IntensityScaling0=1.0, IntensityScaling1=1.0)
 
         ws0 = makeWorkspace(*origin.getSpectrum(0))
@@ -622,22 +750,21 @@ class CrystalFieldFitTest(unittest.TestCase):
         fit = CrystalFieldFit(cf, InputWorkspace=[ws0, ws1])
         fit.fit()
 
-        self.assertAlmostEqual(cf.background[0].background.param['A0'], 1.0, 8)
-        self.assertAlmostEqual(cf.background[1].background.param['A0'], 1.2, 8)
+        self.assertAlmostEqual(cf.background[0].background.param['A0'], 1.0, 4)
+        self.assertAlmostEqual(cf.background[1].background.param['A0'], 1.2, 4)
 
     def test_CrystalFieldFit_multi_spectrum_peak_background(self):
         from CrystalField.fitting import makeWorkspace
         from CrystalField import CrystalField, CrystalFieldFit, Background, Function
-        from mantid.simpleapi import FunctionFactory
         origin = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544,
                               Temperature=[44.0, 50], FWHM=[1.1, 0.9])
-        origin.setPeaks('Lorentzian')
+        origin.PeakShape = 'Lorentzian'
+        origin.background = Background(peak=Function('Gaussian', Height=10, Sigma=0.3))
+        origin.background[1].peak.param['Sigma'] = 0.8
         origin.peaks[0].param[0]['FWHM'] = 1.11
         origin.peaks[1].param[1]['FWHM'] = 1.12
-        origin.setBackground(peak=Function('Gaussian', Height=10, Sigma=0.3))
-        origin.background[1].peak.param['Sigma'] = 0.8
-        s = origin.makeMultiSpectrumFunction()
-        fun = FunctionFactory.createInitialized(s)
+
+        fun = origin.function
 
         self.assertEqual(fun.getParameterValue('f0.f0.Sigma'), 0.3)
         self.assertEqual(fun.getParameterValue('f1.f0.Sigma'), 0.8)
@@ -652,10 +779,10 @@ class CrystalFieldFitTest(unittest.TestCase):
 
         cf = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544,
                           Temperature=[44.0, 50], FWHM=[1.1, 0.9])
-        cf.setPeaks('Lorentzian')
+        cf.PeakShape = 'Lorentzian'
+        cf.background = Background(peak=Function('Gaussian', Height=10, Sigma=0.3))
         cf.peaks[0].param[0]['FWHM'] = 1.11
         cf.peaks[1].param[1]['FWHM'] = 1.12
-        cf.setBackground(peak=Function('Gaussian', Height=10, Sigma=0.3))
         cf.ties(IntensityScaling0=1.0, IntensityScaling1=1.0)
 
         ws0 = makeWorkspace(*origin.getSpectrum(0))
@@ -696,9 +823,9 @@ class CrystalFieldFitTest(unittest.TestCase):
         self.assertEquals(fit.check_consistency(), None)
 
     def test_multi_ion_single_spectrum(self):
+
         from CrystalField.fitting import makeWorkspace
         from CrystalField import CrystalField, CrystalFieldFit
-        from mantid.simpleapi import FunctionFactory
         params = {'B20': 0.37737, 'B22': 3.9770, 'B40': -0.031787, 'B42': -0.11611, 'B44': -0.12544,
                   'Temperature': 44.0, 'FWHM': 1.1}
         cf1 = CrystalField('Ce', 'C2v', **params)
@@ -740,7 +867,6 @@ class CrystalFieldFitTest(unittest.TestCase):
     def test_multi_ion_multi_spectrum(self):
         from CrystalField.fitting import makeWorkspace
         from CrystalField import CrystalField, CrystalFieldFit
-        from mantid.simpleapi import FunctionFactory
         params = {'B20': 0.37737, 'B22': 3.9770, 'B40': -0.031787, 'B42': -0.11611, 'B44': -0.12544,
                   'Temperature': [44.0, 50.0], 'FWHM': [1.1, 0.9]}
         cf1 = CrystalField('Ce', 'C2v', **params)
@@ -787,30 +913,30 @@ class CrystalFieldFitTest(unittest.TestCase):
 
         cf = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544,
                           Temperature=50, FWHM=0.9)
-        cf.setPeaks('Lorentzian')
-        cf.setBackground(peak=Function('Gaussian', Height=10, Sigma=0.3),
-                         background=Function('LinearBackground', A0=1.0))
+        cf.PeakShape = 'Lorentzian'
+        cf.background = Background(peak=Function('Gaussian', Height=10.0, Sigma=0.3),
+                                   background=Function('LinearBackground', A0=1.0))
 
         cf.ties(B40='B20/2')
         cf.constraints('IntensityScaling > 0', 'B22 < 4')
         cf.peaks.constraints('f0.FWHM < 2.2', 'f1.FWHM >= 0.1')
-        cf.peaks.ties('f2.FWHM=2*f1.FWHM', 'f3.FWHM=2*f2.FWHM')
+        cf.peaks.ties({'f2.FWHM': '2*f1.FWHM', 'f3.FWHM': '2*f2.FWHM'})
         cf.background.peak.ties(Height=10.1)
         cf.background.peak.constraints('Sigma > 0')
         cf.background.background.ties(A0=0.1)
         cf.background.background.constraints('A1 > 0')
 
         s = cf.makeSpectrumFunction()
-        self.assertTrue('IntensityScaling > 0' in s)
-        self.assertTrue('B22 < 4' in s)
-        self.assertTrue('f0.FWHM < 2.2' in s)
-        self.assertTrue('f1.FWHM >= 0.1' in s)
-        self.assertTrue('Sigma > 0' in s)
-        self.assertTrue('A1 > 0' in s)
-        self.assertTrue('f2.FWHM=2*f1.FWHM' in s)
-        self.assertTrue('f3.FWHM=2*f2.FWHM' in s)
+        self.assertTrue('0<IntensityScaling' in s)
+        self.assertTrue('B22<4' in s)
+        self.assertTrue('0<f0.f0.Sigma' in s)
+        self.assertTrue('0<f0.f1.A1' in s)
         self.assertTrue('Height=10.1' in s)
         self.assertTrue('A0=0.1' in s)
+        self.assertTrue('f0.FWHM<2.2' in s)
+        self.assertTrue('0.1<f1.FWHM' in s)
+        self.assertTrue('f2.FWHM=2*f1.FWHM' in s)
+        self.assertTrue('f3.FWHM=2*f2.FWHM' in s)
 
         # Test that ties and constraints are correctly defined
         fun = FunctionFactory.createInitialized(s)
@@ -822,7 +948,7 @@ class CrystalFieldFitTest(unittest.TestCase):
 
         cf = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544,
                           Temperature=50, FWHM=0.9)
-        cf.peaks.tieAll('FWHM=2.1', 3)
+        cf.peaks.tieAll(' FWHM=2.1', 3)
 
         s = cf.makeSpectrumFunction()
         self.assertTrue('f0.FWHM=2.1' in s)
@@ -863,10 +989,10 @@ class CrystalFieldFitTest(unittest.TestCase):
         cf.peaks.constrainAll('0.1 < FWHM <=2.1', 3)
 
         s = cf.makeSpectrumFunction()
-        self.assertTrue('0.1 < f0.FWHM <=2.1' in s)
-        self.assertTrue('0.1 < f1.FWHM <=2.1' in s)
-        self.assertTrue('0.1 < f2.FWHM <=2.1' in s)
-        self.assertTrue('0.1 < f3.FWHM <=2.1' not in s)
+        self.assertTrue('0.1<f0.FWHM<2.1' in s)
+        self.assertTrue('0.1<f1.FWHM<2.1' in s)
+        self.assertTrue('0.1<f2.FWHM<2.1' in s)
+        self.assertTrue('0.1<f3.FWHM<2.1' not in s)
 
         # Test that ties and constraints are correctly defined
         fun = FunctionFactory.createInitialized(s)
@@ -881,10 +1007,10 @@ class CrystalFieldFitTest(unittest.TestCase):
         cf.peaks.constrainAll('0.1 < FWHM <=2.1', 1, 2)
 
         s = cf.makeSpectrumFunction()
-        self.assertTrue('0.1 < f0.FWHM <=2.1' not in s)
-        self.assertTrue('0.1 < f1.FWHM <=2.1' in s)
-        self.assertTrue('0.1 < f2.FWHM <=2.1' in s)
-        self.assertTrue('0.1 < f3.FWHM <=2.1' not in s)
+        self.assertTrue('0.1<f0.FWHM<2.1' not in s)
+        self.assertTrue('0.1<f1.FWHM<2.1' in s)
+        self.assertTrue('0.1<f2.FWHM<2.1' in s)
+        self.assertTrue('0.1<f3.FWHM<2.1' not in s)
 
         # Test that ties and constraints are correctly defined
         fun = FunctionFactory.createInitialized(s)
@@ -896,8 +1022,8 @@ class CrystalFieldFitTest(unittest.TestCase):
 
         cf = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544,
                           Temperature=[44.0, 50], FWHM=[1.1, 0.9])
-        cf.setPeaks('Lorentzian')
-        cf.setBackground(peak=Function('Gaussian', Height=10, Sigma=0.3),
+        cf.PeakShape = 'Lorentzian'
+        cf.background = Background(peak=Function('Gaussian', Height=10, Sigma=0.3),
                          background=Function('FlatBackground', A0=1.0))
         cf.constraints('IntensityScaling0 > 0', '0 < IntensityScaling1 < 2', 'B22 < 4')
         cf.background[0].peak.ties(Height=10.1)
@@ -905,21 +1031,21 @@ class CrystalFieldFitTest(unittest.TestCase):
         cf.background[1].peak.ties(Height=20.2)
         cf.background[1].peak.constraints('Sigma > 0.2')
 
-        cf.peaks[1].ties('f2.FWHM=2*f1.FWHM', 'f3.FWHM=2*f2.FWHM')
+        cf.peaks[1].ties({'f2.FWHM': '2*f1.FWHM', 'f3.FWHM': '2*f2.FWHM'})
         cf.peaks[0].constraints('f1.FWHM < 2.2')
         cf.peaks[1].constraints('f1.FWHM > 1.1', '1 < f4.FWHM < 2.2')
 
         s = cf.makeMultiSpectrumFunction()
 
-        self.assertTrue('IntensityScaling0 > 0' in s)
-        self.assertTrue('IntensityScaling1 < 2' in s)
+        self.assertTrue('0<IntensityScaling0' in s)
+        self.assertTrue('IntensityScaling1<2' in s)
         self.assertTrue('f0.f0.f0.Height=10.1' in s)
         self.assertTrue('f1.f0.f0.Height=20.2' in s)
-        self.assertTrue('f0.f0.f0.Sigma > 0.1' in s)
-        self.assertTrue('f1.f0.f0.Sigma > 0.2' in s)
-        self.assertTrue('f0.f1.FWHM < 2.2' in s)
-        self.assertTrue('f1.f1.FWHM > 1.1' in s)
-        self.assertTrue('1 < f1.f4.FWHM < 2.2' in s)
+        self.assertTrue('0.1<f0.f0.f0.Sigma' in s)
+        self.assertTrue('0.2<f1.f0.f0.Sigma' in s)
+        self.assertTrue('f0.f1.FWHM<2.2' in s)
+        self.assertTrue('1.1<f1.f1.FWHM' in s)
+        self.assertTrue('1<f1.f4.FWHM<2.2' in s)
         self.assertTrue('f1.f2.FWHM=2*f1.f1.FWHM' in s)
         self.assertTrue('f1.f3.FWHM=2*f1.f2.FWHM' in s)
 
@@ -962,16 +1088,16 @@ class CrystalFieldFitTest(unittest.TestCase):
         cf.peaks[1].constrainAll('FWHM > 12.1', 3, 5)
 
         s = cf.makeMultiSpectrumFunction()
-        self.assertTrue('0.1 < f0.f0.FWHM <=2.1' not in s)
-        self.assertTrue('0.1 < f0.f1.FWHM <=2.1' in s)
-        self.assertTrue('0.1 < f0.f2.FWHM <=2.1' in s)
-        self.assertTrue('0.1 < f0.f4.FWHM <=2.1' not in s)
+        self.assertTrue('0.1<f0.f0.FWHM<2.1' not in s)
+        self.assertTrue('0.1<f0.f1.FWHM<2.1' in s)
+        self.assertTrue('0.1<f0.f2.FWHM<2.1' in s)
+        self.assertTrue('0.1<f0.f4.FWHM<2.1' not in s)
 
-        self.assertTrue('f1.f2.FWHM > 12.1' not in s)
-        self.assertTrue('f1.f3.FWHM > 12.1' in s)
-        self.assertTrue('f1.f4.FWHM > 12.1' in s)
-        self.assertTrue('f1.f5.FWHM > 12.1' in s)
-        self.assertTrue('f1.f6.FWHM > 12.1' not in s)
+        self.assertTrue('12.1<f1.f2.FWHM' not in s)
+        self.assertTrue('12.1<f1.f3.FWHM' in s)
+        self.assertTrue('12.1<f1.f4.FWHM' in s)
+        self.assertTrue('12.1<f1.f5.FWHM' in s)
+        self.assertTrue('12.1<f1.f6.FWHM' not in s)
 
         # Test that ties and constraints are correctly defined
         fun = FunctionFactory.createInitialized(s)
@@ -996,91 +1122,82 @@ class CrystalFieldFitTest(unittest.TestCase):
         cf2 = CrystalField('Pr', 'C2v', **params)
         cf = cf1 + cf2
 
-        cf1.setPeaks('Lorentzian')
-        cf1.setBackground(peak=Function('Gaussian', Height=10, Sigma=0.3),
-                         background=Function('FlatBackground', A0=1.0))
+        cf1.PeakShape = 'Lorentzian'
+        cf1.background = Background(peak=Function('Gaussian', Height=10, Sigma=0.3),
+                                    background=Function('FlatBackground', A0=1.0))
         cf1.constraints('IntensityScaling0 > 0', '0 < IntensityScaling1 < 2', 'B22 < 4')
         cf1.background[0].peak.ties(Height=10.1)
         cf1.background[0].peak.constraints('Sigma > 0.1')
         cf1.background[1].peak.ties(Height=20.2)
         cf1.background[1].peak.constraints('Sigma > 0.2')
 
-        cf1.peaks[1].ties('f2.FWHM=2*f1.FWHM', 'f3.FWHM=2*f2.FWHM')
+        cf1.peaks[1].ties({'f2.FWHM': '2*f1.FWHM', 'f3.FWHM': '2*f2.FWHM'})
         cf1.peaks[0].constraints('f1.FWHM < 2.2')
         cf1.peaks[1].constraints('f1.FWHM > 1.1', '1 < f4.FWHM < 2.2')
 
-        cf2.setPeaks('Gaussian')
-        cf2.setBackground(peak=Function('Lorentzian', Amplitude=8, FWHM=0.33),
-                         background=Function('FlatBackground', A0=1.0))
+        cf2.PeakShape = 'Gaussian'
+        cf2.background = Background(peak=Function('Lorentzian', Amplitude=8, FWHM=0.33),
+                                    background=Function('FlatBackground', A0=1.0))
         cf2.background[0].peak.ties(Amplitude=8.1)
         cf2.background[0].peak.constraints('FWHM > 0.1')
         cf2.background[1].peak.ties(Amplitude=16.2)
         cf2.background[1].peak.constraints('FWHM > 0.2')
-        cf2.peaks[1].ties('f2.Sigma=2*f1.Sigma', 'f3.Sigma=2*f2.Sigma')
+        cf2.peaks[1].ties({'f2.Sigma': '2*f1.Sigma', 'f3.Sigma': '2*f2.Sigma'})
         cf2.peaks[0].constraints('f1.Sigma < 2.2')
         cf2.peaks[1].constraints('f1.Sigma > 1.1', '1 < f4.Sigma < 2.2')
 
         s = cf.makeMultiSpectrumFunction()
 
-        self.assertTrue('IntensityScaling0 > 0' in s)
-        self.assertTrue('IntensityScaling1 < 2' in s)
+        self.assertTrue('0<IntensityScaling0' in s)
+        self.assertTrue('IntensityScaling1<2' in s)
         self.assertTrue('f0.f0.f0.Height=10.1' in s)
         self.assertTrue('f1.f0.f0.Height=20.2' in s)
-        self.assertTrue('f0.f0.f0.Sigma > 0.1' in s)
-        self.assertTrue('f1.f0.f0.Sigma > 0.2' in s)
-        self.assertTrue('f0.f1.FWHM < 2.2' in s)
-        self.assertTrue('f1.f1.FWHM > 1.1' in s)
-        self.assertTrue('1 < f1.f4.FWHM < 2.2' in s)
+        self.assertTrue('0.1<f0.f0.f0.Sigma' in s)
+        self.assertTrue('0.2<f1.f0.f0.Sigma' in s)
+        self.assertTrue('f0.f1.FWHM<2.2' in s)
+        self.assertTrue('1.1<f1.f1.FWHM' in s)
+        self.assertTrue('1<f1.f4.FWHM<2.2' in s)
         self.assertTrue('f1.f2.FWHM=2*f1.f1.FWHM' in s)
         self.assertTrue('f1.f3.FWHM=2*f1.f2.FWHM' in s)
 
         self.assertTrue('f0.f0.f0.Amplitude=8.1' in s)
         self.assertTrue('f1.f0.f0.Amplitude=16.2' in s)
-        self.assertTrue('f0.f0.f0.FWHM > 0.1' in s)
-        self.assertTrue('f1.f0.f0.FWHM > 0.2' in s)
+        self.assertTrue('0.1<f0.f0.f0.FWHM' in s)
+        self.assertTrue('0.2<f1.f0.f0.FWHM' in s)
         self.assertTrue('f1.f2.Sigma=2*f1.f1.Sigma' in s)
         self.assertTrue('f1.f3.Sigma=2*f1.f2.Sigma' in s)
-        self.assertTrue('f0.f1.Sigma < 2.2' in s)
-        self.assertTrue('f1.f1.Sigma > 1.1' in s)
-        self.assertTrue('1 < f1.f4.Sigma < 2.2' in s)
+        self.assertTrue('f0.f1.Sigma<2.2' in s)
+        self.assertTrue('1.1<f1.f1.Sigma' in s)
+        self.assertTrue('1<f1.f4.Sigma<2.2' in s)
 
         fun = FunctionFactory.createInitialized(s)
 
     def test_bad_input(self):
         from CrystalField import CrystalField
-        from mantid.simpleapi import FunctionFactory
-
-        cf = CrystalField('Ce', 'C2v', B20='aaa', B22=3.97, B40=-0.0317, B42=-0.116, B44=-0.12,
-                      Temperature=44.0, FWHM=1.0)
-        s = cf.makeSpectrumFunction()
-        self.assertRaises(RuntimeError, FunctionFactory.createInitialized, s)
 
-        cf = CrystalField('Ce', 'C2v', B20=1, B22=3.97, B40=[-0.0317], B42=-0.116, B44=-0.12,
+        self.assertRaises(Exception, CrystalField, 'Ce', 'C2v', B20='aaa', B22=3.97, B40=-0.0317, B42=-0.116, B44=-0.12,
                           Temperature=44.0, FWHM=1.0)
-        s = cf.makeSpectrumFunction()
-        self.assertRaises(RuntimeError, FunctionFactory.createInitialized, s)
 
-        cf = CrystalField('Ce', 'C2v', B20=1, B22=3.97, B40=np.array([-0.0317]), B42=-0.116, B44=-0.12,
+        self.assertRaises(Exception, CrystalField, 'Ce', 'C2v', B20=1, B22=3.97, B40=[-0.0317], B42=-0.116, B44=-0.12,
                           Temperature=44.0, FWHM=1.0)
-        s = cf.makeSpectrumFunction()
-        self.assertRaises(RuntimeError, FunctionFactory.createInitialized, s)
 
-        cf = CrystalField('Ce', 'C2v', B20=1, B22=3.97, B40=np.array([1.2, 2.3]), B42=-0.116, B44=-0.12,
-                          Temperature=44.0, FWHM=1.0)
-        s = cf.makeSpectrumFunction()
-        self.assertRaises(RuntimeError, FunctionFactory.createInitialized, s)
+        self.assertRaises(Exception, CrystalField, 'Ce', 'C2v', B20=1, B22=3.97, B40=np.array([-0.0317]), B42=-0.116,
+                          B44=-0.12, Temperature=44.0, FWHM=1.0)
+
+        self.assertRaises(Exception, CrystalField, 'Ce', 'C2v', B20=1, B22=3.97, B40=np.array([1.2, 2.3]), B42=-0.116,
+                          B44=-0.12, Temperature=44.0, FWHM=1.0)
 
         cf = CrystalField('Ce', 'C2v', B20=1, B22=3.97, B40=-0.0317, B42=-0.116, B44=-0.12,
                           Temperature=44.0, FWHM=1.0)
-        cf.peaks.param[1]["FWHM"] = 'aaa'
-        s = cf.makeSpectrumFunction()
-        self.assertRaises(RuntimeError, FunctionFactory.createInitialized, s)
+
+        def set_peak_parameter():
+            cf.peaks.param[1]["FWHM"] = 'aaa'
+        self.assertRaises(Exception, set_peak_parameter)
 
     def test_resolution_single_spectrum(self):
         from CrystalField import CrystalField
         cf = CrystalField('Ce', 'C2v', B20=0.37, B22=3.97, B40=-0.0317, B42=-0.116, B44=-0.12,
                       Temperature=44.0, FWHM=1.0, ResolutionModel=([0, 50], [1, 2]))
-        sp = cf.getSpectrum()
         self.assertAlmostEqual(cf.peaks.param[0]['FWHM'], 1.0, 8)
         self.assertAlmostEqual(cf.peaks.param[1]['FWHM'], 1.58101468, 8)
         self.assertAlmostEqual(cf.peaks.param[2]['FWHM'], 1.884945866, 8)
@@ -1219,53 +1336,54 @@ class CrystalFieldFitTest(unittest.TestCase):
 
     def test_ResolutionModel_set_multi(self):
         from CrystalField import ResolutionModel, CrystalField, CrystalFieldFit
-        from mantid.simpleapi import FunctionFactory
 
         x0 = [0, 50]
         y0 = [1, 2]
-        x1 = [0, 50]
+        x1 = [0, 51]
         y1 = [3, 4]
         rm = ResolutionModel([(x0, y0), (x1, y1)])
 
         cf = CrystalField('Ce', 'C2v', B20=0.37, B22=3.97, B40=-0.0317, B42=-0.116, B44=-0.12,
                       Temperature=[44.0, 50], ResolutionModel=rm)
 
-        sp = cf.makeSpectrumFunction(0)
-        fun = FunctionFactory.createInitialized(sp)
-        self.assertTrue('FWHMX=(0, 50),FWHMY=(1, 2)' in sp)
-
-        sp = cf.makeSpectrumFunction(1)
-        fun = FunctionFactory.createInitialized(sp)
-        self.assertTrue('FWHMX=(0, 50),FWHMY=(3, 4)' in sp)
+        att = cf.function.getAttributeValue('FWHMX0')
+        self.assertEqual(att[0], 0)
+        self.assertEqual(att[1], 50)
+        att = cf.function.getAttributeValue('FWHMY0')
+        self.assertEqual(att[0], 1)
+        self.assertEqual(att[1], 2)
+        att = cf.function.getAttributeValue('FWHMX1')
+        self.assertEqual(att[0], 0)
+        self.assertEqual(att[1], 51)
+        att = cf.function.getAttributeValue('FWHMY1')
+        self.assertEqual(att[0], 3)
+        self.assertEqual(att[1], 4)
 
     def test_ResolutionModel_set_multi_variation(self):
         from CrystalField import ResolutionModel, CrystalField, CrystalFieldFit
-        from mantid.simpleapi import FunctionFactory
 
         x0 = [0, 50]
         y0 = [1, 2]
-        x1 = [0, 50]
+        x1 = [1, 51]
         y1 = [3, 4]
         rm = ResolutionModel([(x0, y0), (x1, y1)])
 
         cf = CrystalField('Ce', 'C2v', B20=0.37, B22=3.97, B40=-0.0317, B42=-0.116, B44=-0.12,
-                      Temperature=[44.0, 50], ResolutionModel=rm,FWHMVariation=0.1)
-
-        sp = cf.makeSpectrumFunction(0)
-        fun = FunctionFactory.createInitialized(sp)
-        self.assertTrue('FWHMX=(0, 50),FWHMY=(1, 2)' in sp)
-        self.assertTrue('FWHMVariation=0.1' in sp)
-
-        sp = cf.makeSpectrumFunction(1)
-        fun = FunctionFactory.createInitialized(sp)
-        self.assertTrue('FWHMX=(0, 50),FWHMY=(3, 4)' in sp)
-        self.assertTrue('FWHMVariation=0.1' in sp)
-
-        sp = cf.makeMultiSpectrumFunction()
-        fun = FunctionFactory.createInitialized(sp)
-        self.assertTrue('FWHMX0=(0, 50),FWHMY0=(1, 2)' in sp)
-        self.assertTrue('FWHMX1=(0, 50),FWHMY1=(3, 4)' in sp)
-        self.assertTrue('FWHMVariation=0.1' in sp)
+                      Temperature=[44.0, 50], ResolutionModel=rm, FWHMVariation=0.1)
+
+        att = cf.function.getAttributeValue('FWHMX0')
+        self.assertEqual(att[0], 0)
+        self.assertEqual(att[1], 50)
+        att = cf.function.getAttributeValue('FWHMY0')
+        self.assertEqual(att[0], 1)
+        self.assertEqual(att[1], 2)
+        att = cf.function.getAttributeValue('FWHMX1')
+        self.assertEqual(att[0], 1)
+        self.assertEqual(att[1], 51)
+        att = cf.function.getAttributeValue('FWHMY1')
+        self.assertEqual(att[0], 3)
+        self.assertEqual(att[1], 4)
+        self.assertEqual(cf.FWHMVariation, 0.1)
 
     def test_peak_width_update(self):
         from CrystalField import ResolutionModel, CrystalField
@@ -1276,15 +1394,12 @@ class CrystalFieldFitTest(unittest.TestCase):
 
         cf1 = CrystalField('Ce', 'C2v', B20=0.37, B22=3.97, B40=-0.0317, B42=-0.116, B44=-0.12,
                            Temperature=44.0, FWHM=1.0, ResolutionModel=rm, FWHMVariation=0.01)
-        sp1 = cf1.getSpectrum()
 
         cf2 = CrystalField('Ce', 'C2v', B20=0.57, B22=2.97, B40=-0.0317, B42=-0.116, B44=-0.12,
                            Temperature=44.0, FWHM=1.0, ResolutionModel=rm, FWHMVariation=0.01)
-        sp2 = cf2.getSpectrum()
 
         cf1['B20'] = 0.57
         cf1['B22'] = 2.97
-        sp1 = cf1.getSpectrum()
         self.assertEqual(cf1.peaks.param[1]['Amplitude'], cf2.peaks.param[1]['Amplitude'],)
         self.assertEqual(cf1.peaks.param[1]['FWHM'], cf2.peaks.param[1]['FWHM'],)
 
@@ -1573,11 +1688,11 @@ class CrystalFieldFitTest(unittest.TestCase):
         out0 = out[0].readY(1)
         out1 = out[1].readY(1)
 
-        self.assertTrue(np.all(out0 / y0 > 2.49999999999))
-        self.assertTrue(np.all(out0 / y0 < 2.50000000001))
+        self.assertTrue(np.all(out0 / y0 > 2.49))
+        self.assertTrue(np.all(out0 / y0 < 2.51))
 
-        self.assertTrue(np.all(out1 / y1 > 1.49999999999))
-        self.assertTrue(np.all(out1 / y1 < 1.50000000001))
+        self.assertTrue(np.all(out1 / y1 > 1.49))
+        self.assertTrue(np.all(out1 / y1 < 1.51))
 
     def test_CrystalField_PointCharge_ligand(self):
         from CrystalField import PointCharge
diff --git a/scripts/test/ISISPowderCommonTest.py b/scripts/test/ISISPowderCommonTest.py
index 905fff5ff20e682618ba7b9cd9d773cd97005c60..c1ad2cea4e3dc4561d6cd253ef8405c8e4f62171 100644
--- a/scripts/test/ISISPowderCommonTest.py
+++ b/scripts/test/ISISPowderCommonTest.py
@@ -5,7 +5,7 @@ import unittest
 
 from six_shim import assertRaisesRegex
 
-from isis_powder.routines import common
+from isis_powder.routines import common, common_enums
 
 
 class ISISPowderCommonTest(unittest.TestCase):
@@ -66,12 +66,14 @@ class ISISPowderCommonTest(unittest.TestCase):
             common.crop_banks_using_crop_list(bank_list=bank_list[1:], crop_values_list=cropping_value_list)
 
         # Check we can crop a single workspace from the list
-        cropped_single_ws_list = common.crop_banks_using_crop_list(bank_list=[bank_list[0]], crop_values_list=[cropping_value])
+        cropped_single_ws_list = common.crop_banks_using_crop_list(bank_list=[bank_list[0]],
+                                                                   crop_values_list=[cropping_value])
         self.assertEqual(cropped_single_ws_list[0].blocksize(), expected_number_of_bins)
         mantid.DeleteWorkspace(Workspace=cropped_single_ws_list[0])
 
         # Check we can crop a whole list
-        cropped_ws_list = common.crop_banks_using_crop_list(bank_list=bank_list[1:], crop_values_list=cropping_value_list[1:])
+        cropped_ws_list = common.crop_banks_using_crop_list(bank_list=bank_list[1:],
+                                                            crop_values_list=cropping_value_list[1:])
         for ws in cropped_ws_list[1:]:
             self.assertEqual(ws.blocksize(), expected_number_of_bins)
             mantid.DeleteWorkspace(Workspace=ws)
@@ -160,7 +162,7 @@ class ISISPowderCommonTest(unittest.TestCase):
 
     def test_extract_ws_spectra(self):
         number_of_expected_banks = 5
-        ws_to_split = mantid.CreateSampleWorkspace(XMin=0, XMax=1, BankPixelWidth=1,
+        ws_to_split = mantid.CreateSampleWorkspace(XMin=0, XMax=2, BankPixelWidth=1,
                                                    NumBanks=number_of_expected_banks)
         input_name = ws_to_split.getName()
 
@@ -242,19 +244,195 @@ class ISISPowderCommonTest(unittest.TestCase):
         with assertRaisesRegex(self, ValueError, run_input_sting):
             common.generate_run_numbers(run_number_string=run_input_sting)
 
+    def test_load_current_normalised_workspace(self):
+        run_number_single = 100
+        run_number_range = "100-101"
+
+        bin_index = 8
+        first_run_bin_value = 0.59706224
+        second_run_bin_value = 1.48682782
+
+        # Check it handles a single workspace correctly
+        single_workspace = common.load_current_normalised_ws_list(run_number_string=run_number_single,
+                                                                  instrument=ISISPowderMockInst())
+        # Get the only workspace in the list, ask for the 0th spectrum and the value at the 200th bin
+        self.assertTrue(isinstance(single_workspace, list))
+        self.assertEqual(len(single_workspace), 1)
+        self.assertAlmostEqual(single_workspace[0].readY(0)[bin_index], first_run_bin_value)
+        mantid.DeleteWorkspace(single_workspace[0])
+
+        # Does it return multiple workspaces when instructed
+        multiple_ws = common.load_current_normalised_ws_list(
+            run_number_string=run_number_range, instrument=ISISPowderMockInst(),
+            input_batching=common_enums.INPUT_BATCHING.Individual)
+
+        self.assertTrue(isinstance(multiple_ws, list))
+        self.assertEqual(len(multiple_ws), 2)
+
+        # Check the bins haven't been summed
+        self.assertAlmostEqual(multiple_ws[0].readY(0)[bin_index], first_run_bin_value)
+        self.assertAlmostEqual(multiple_ws[1].readY(0)[bin_index], second_run_bin_value)
+        for ws in multiple_ws:
+            mantid.DeleteWorkspace(ws)
+
+        # Does it sum workspaces when instructed
+        summed_ws = common.load_current_normalised_ws_list(
+            run_number_string=run_number_range, instrument=ISISPowderMockInst(),
+            input_batching=common_enums.INPUT_BATCHING.Summed)
+
+        self.assertTrue(isinstance(summed_ws, list))
+        self.assertEqual(len(summed_ws), 1)
+
+        # Check bins have been summed
+        self.assertAlmostEqual(summed_ws[0].readY(0)[bin_index], (first_run_bin_value + second_run_bin_value))
+        mantid.DeleteWorkspace(summed_ws[0])
+
+    def test_load_current_normalised_ws_respects_ext(self):
+        run_number = "100"
+        file_ext_one = ".s1"
+        file_ext_two = ".s2"
+
+        bin_index = 5
+
+        result_ext_one = 1.25270032
+        result_ext_two = 1.15126361
+
+        # Check that it respects the ext flag - try the first extension of this name
+        returned_ws_one = common.load_current_normalised_ws_list(instrument=ISISPowderMockInst(file_ext=file_ext_one),
+                                                                 run_number_string=run_number)
+        # Have to store result and delete the ws as they share the same name so will overwrite
+        result_ws_one = returned_ws_one[0].readY(0)[bin_index]
+        mantid.DeleteWorkspace(returned_ws_one[0])
+
+        returned_ws_two = common.load_current_normalised_ws_list(instrument=ISISPowderMockInst(file_ext=file_ext_two),
+                                                                 run_number_string=run_number)
+        result_ws_two = returned_ws_two[0].readY(0)[bin_index]
+        mantid.DeleteWorkspace(returned_ws_two[0])
+
+        # Ensure it loaded two different workspaces
+        self.assertAlmostEqual(result_ws_one, result_ext_one)
+        self.assertAlmostEqual(result_ws_two, result_ext_two)
+        self.assertNotAlmostEqual(result_ext_one, result_ext_two)
+
+    def test_rebin_bin_boundary_defaults(self):
+        ws = mantid.CreateSampleWorkspace(OutputWorkspace='test_rebin_bin_boundary_default',
+                                          Function='Flat background', NumBanks=1, BankPixelWidth=1, XMax=10, BinWidth=1)
+        new_bin_width = 0.5
+        # Originally had bins at 1 unit each. So binning of 0.5 should give us 2n bins back
+        original_number_bins = ws.getNumberBins()
+        original_first_x_val = ws.readX(0)[0]
+        original_last_x_val = ws.readX(0)[-1]
+
+        expected_bins = original_number_bins * 2
+
+        ws = common.rebin_workspace(workspace=ws, new_bin_width=new_bin_width)
+        self.assertEqual(ws.getNumberBins(), expected_bins)
+
+        # Check bin boundaries were preserved
+        self.assertEqual(ws.readX(0)[0], original_first_x_val)
+        self.assertEqual(ws.readX(0)[-1], original_last_x_val)
+
+        mantid.DeleteWorkspace(ws)
+
+    def test_rebin_bin_boundary_specified(self):
+        ws = mantid.CreateSampleWorkspace(OutputWorkspace='test_rebin_bin_boundary_specified',
+                                          Function='Flat background', NumBanks=1, BankPixelWidth=1, XMax=10, BinWidth=1)
+        # Originally we had 10 bins from 0, 10. Resize from 0, 0.5, 5 so we should have the same number of output
+        # bins with different boundaries
+        new_bin_width = 0.5
+        original_number_bins = ws.getNumberBins()
+
+        expected_start_x = 1
+        expected_end_x = 6
+
+        ws = common.rebin_workspace(workspace=ws, new_bin_width=new_bin_width,
+                                    start_x=expected_start_x, end_x=expected_end_x)
+
+        # Check number of bins is the same as we halved the bin width and interval so we should have n bins
+        self.assertEqual(ws.getNumberBins(), original_number_bins)
+
+        # Check bin boundaries were changed
+        self.assertEqual(ws.readX(0)[0], expected_start_x)
+        self.assertEqual(ws.readX(0)[-1], expected_end_x)
+
+        mantid.DeleteWorkspace(ws)
+
+    def test_rebin_workspace_list_defaults(self):
+        new_bin_width = 0.5
+        number_of_ws = 10
+
+        ws_bin_widths = [new_bin_width] * number_of_ws
+        ws_list = []
+        for i in range(number_of_ws):
+            out_name = "test_rebin_workspace_list_defaults_" + str(i)
+            ws_list.append(mantid.CreateSampleWorkspace(OutputWorkspace=out_name, Function='Flat background',
+                                                        NumBanks=1, BankPixelWidth=1, XMax=10, BinWidth=1))
+        # What if the item passed in is not a list
+        err_msg_not_list = "was not a list"
+        with assertRaisesRegex(self, RuntimeError, err_msg_not_list):
+            common.rebin_workspace_list(workspace_list=ws_list, bin_width_list=None)
+
+        with assertRaisesRegex(self, RuntimeError, err_msg_not_list):
+            common.rebin_workspace_list(workspace_list=None, bin_width_list=[])
+
+        # What about if the lists aren't the same length
+        with assertRaisesRegex(self, ValueError, "does not match the number of banks"):
+            incorrect_number_bin_widths = [1] * (number_of_ws - 1)
+            common.rebin_workspace_list(workspace_list=ws_list, bin_width_list=incorrect_number_bin_widths)
+
+        # Does it return all the workspaces as a list - another unit test checks the implementation
+        output = common.rebin_workspace_list(workspace_list=ws_list, bin_width_list=ws_bin_widths)
+        self.assertEqual(len(output), number_of_ws)
+
+        for ws in output:
+            mantid.DeleteWorkspace(ws)
+
+    def test_rebin_workspace_list_x_start_end(self):
+        new_start_x = 1
+        new_end_x = 5
+        new_bin_width = 0.5
+        number_of_ws = 10
+
+        ws_bin_widths = [new_bin_width] * number_of_ws
+        start_x_list = [new_start_x] * number_of_ws
+        end_x_list = [new_end_x] * number_of_ws
+
+        ws_list = []
+        for i in range(number_of_ws):
+            out_name = "test_rebin_workspace_list_defaults_" + str(i)
+            ws_list.append(mantid.CreateSampleWorkspace(OutputWorkspace=out_name, Function='Flat background',
+                                                        NumBanks=1, BankPixelWidth=1, XMax=10, BinWidth=1))
+
+        # Are the lengths checked
+        incorrect_length = [1] * (number_of_ws - 1)
+        with assertRaisesRegex(self, ValueError, "The number of starting bin values"):
+            common.rebin_workspace_list(workspace_list=ws_list, bin_width_list=ws_bin_widths,
+                                        start_x_list=incorrect_length, end_x_list=end_x_list)
+        with assertRaisesRegex(self, ValueError, "The number of ending bin values"):
+            common.rebin_workspace_list(workspace_list=ws_list, bin_width_list=ws_bin_widths,
+                                        start_x_list=start_x_list, end_x_list=incorrect_length)
+
+        output_list = common.rebin_workspace_list(workspace_list=ws_list, bin_width_list=ws_bin_widths,
+                                                  start_x_list=start_x_list, end_x_list=end_x_list)
+        self.assertEqual(len(output_list), number_of_ws)
+        for ws in output_list:
+            self.assertEqual(ws.readX(0)[0], new_start_x)
+            self.assertEqual(ws.readX(0)[-1], new_end_x)
+            mantid.DeleteWorkspace(ws)
+
     def test_remove_intermediate_workspace(self):
         ws_list = []
         ws_names_list = []
 
         ws_single_name = "remove_intermediate_ws-single"
         ws_single = mantid.CreateSampleWorkspace(OutputWorkspace=ws_single_name, NumBanks=1, BankPixelWidth=1,
-                                                 XMax=2, BinWidth=1)
+                                                 XMax=10, BinWidth=1)
 
         for i in range(0, 3):
             out_name = "remove_intermediate_ws_" + str(i)
             ws_names_list.append(out_name)
             ws_list.append(mantid.CreateSampleWorkspace(OutputWorkspace=out_name, NumBanks=1, BankPixelWidth=1,
-                                                        XMax=2, BinWidth=1))
+                                                        XMax=10, BinWidth=1))
 
         # Check single workspaces are removed
         self.assertEqual(True, mantid.mtd.doesExist(ws_single_name))
@@ -285,6 +463,31 @@ class ISISPowderCommonTest(unittest.TestCase):
         common.run_normalise_by_current(ws)
         self.assertAlmostEqual(expected_value, ws.dataY(0)[0], delta=1e-8)
 
+    def test_subtract_summed_runs(self):
+        # Load a vanadium workspace for this test
+        sample_empty_number = "100"
+        ws_file_name = "POL" + sample_empty_number
+        original_ws = mantid.Load(ws_file_name)
+        no_scale_ws = mantid.CloneWorkspace(InputWorkspace=original_ws, OutputWorkspace="test_subtract_sample_empty_ws")
+
+        # Subtracting from self should equal 0
+        returned_ws = common.subtract_summed_runs(ws_to_correct=no_scale_ws, instrument=ISISPowderMockInst(),
+                                                  empty_sample_ws_string=sample_empty_number)
+        y_values = returned_ws.readY(0)
+        for i in range(returned_ws.blocksize()):
+            self.assertAlmostEqual(y_values[i], 0)
+
+        # Check what happens when we specify scale as a half
+        scaled_ws = common.subtract_summed_runs(ws_to_correct=original_ws, instrument=ISISPowderMockInst(),
+                                                scale_factor=0.75, empty_sample_ws_string=sample_empty_number)
+        scaled_y_values = scaled_ws.readY(0)
+        self.assertAlmostEqual(scaled_y_values[2], 0.20257424)
+        self.assertAlmostEqual(scaled_y_values[4], 0.31700152)
+        self.assertAlmostEqual(scaled_y_values[7], 0.35193970)
+
+        mantid.DeleteWorkspace(returned_ws)
+        mantid.DeleteWorkspace(scaled_ws)
+
     def test_spline_workspaces(self):
         ws_list = []
         for i in range(1, 4):
@@ -303,5 +506,31 @@ class ISISPowderCommonTest(unittest.TestCase):
             mantid.DeleteWorkspace(splined_ws)
 
 
+class ISISPowderMockInst(object):
+    def __init__(self, file_ext=None):
+        self._file_ext = file_ext
+
+    @staticmethod
+    def _get_input_batching_mode(**_):
+        # By default return multiple files as it makes something going wrong easier to spot
+        return common_enums.INPUT_BATCHING.Individual
+
+    def _get_run_details(self, **_):
+        return ISISPowderMockRunDetails(file_ext=self._file_ext)
+
+    @staticmethod
+    def _generate_input_file_name(run_number):
+        # Mantid will automatically convert this into either POL or POLARIS
+        return "POL" + str(run_number)
+
+    @staticmethod
+    def _normalise_ws_current(ws_to_correct, **_):
+        return ws_to_correct
+
+
+class ISISPowderMockRunDetails(object):
+    def __init__(self, file_ext):
+        self.file_extension = file_ext
+
 if __name__ == "__main__":
     unittest.main()
diff --git a/scripts/test/ISISPowderRunDetailsTest.py b/scripts/test/ISISPowderRunDetailsTest.py
index 9f2319f54e15f4193bd150dd64273ca2207732e8..305fd52c7c0d987787219b391996522f41e68ce9 100644
--- a/scripts/test/ISISPowderRunDetailsTest.py
+++ b/scripts/test/ISISPowderRunDetailsTest.py
@@ -14,13 +14,15 @@ from isis_powder.routines import run_details
 class ISISPowderInstrumentRunDetailsTest(unittest.TestCase):
     def setup_mock_inst_settings(self, yaml_file_path):
         calibration_dir = tempfile.mkdtemp()
+        # Keep track of list of folders to remove
         self._folders_to_remove = [calibration_dir]
 
+        # Check the required unit test files could be found
         test_configuration_path = mantid.api.FileFinder.getFullPath(yaml_file_path)
         if not test_configuration_path or len(test_configuration_path) <= 0:
             self.fail("Could not find the unit test input file called: " + str(yaml_file_path))
-        mock_inst = MockInstSettings(cal_file_path=test_configuration_path, calibration_dir=calibration_dir)
-        return mock_inst
+
+        return MockInstSettings(cal_file_path=test_configuration_path, calibration_dir=calibration_dir)
 
     def tearDown(self):
         for folder in self._folders_to_remove:
@@ -44,6 +46,9 @@ class ISISPowderInstrumentRunDetailsTest(unittest.TestCase):
         self.assertEqual(output_obj.empty_runs, expected_empty_runs)
         self.assertEqual(output_obj.grouping_file_path,
                          os.path.join(mock_inst.calibration_dir, mock_inst.grouping_file_name))
+        expected_file_ext = mock_inst.file_extension
+        expected_file_ext = expected_file_ext if expected_file_ext.startswith('.') else '.' + expected_file_ext
+        self.assertEqual(output_obj.file_extension, expected_file_ext)
         self.assertEqual(output_obj.label, expected_label)
         self.assertEqual(output_obj.offset_file_path,
                          os.path.join(mock_inst.calibration_dir, expected_label, expected_offset_file_name))
@@ -117,6 +122,7 @@ class MockInstSettings(object):
         self.calibration_dir = calibration_dir
         self.cal_mapping_path = cal_file_path
         self.grouping_file_name = MockInstSettings.gen_random_string()
+        self.file_extension = MockInstSettings.gen_random_string()
 
     @staticmethod
     def gen_random_string():
diff --git a/scripts/test/ISISPowderYamlParserTest.py b/scripts/test/ISISPowderYamlParserTest.py
index 78069332c53178b4bd5bd53c1bbbf401b611dce6..61a99572e40854e8a6bc34066ad2ceb2e3081ec0 100644
--- a/scripts/test/ISISPowderYamlParserTest.py
+++ b/scripts/test/ISISPowderYamlParserTest.py
@@ -115,15 +115,15 @@ class ISISPowderYamlParserTest(unittest.TestCase):
         file_handle.close()
 
         # Test a value in the middle of 1-10
-        with assertRaisesRegex(self, ValueError, "Run number 5 not recognised in calibration mapping"):
+        with assertRaisesRegex(self, ValueError, "Run number 5 not recognised in cycle mapping file"):
             yaml_parser.get_run_dictionary(run_number_string="5", file_path=file_path)
 
         # Check on edge of invalid numbers
-        with assertRaisesRegex(self, ValueError, "Run number 9 not recognised in calibration mapping"):
+        with assertRaisesRegex(self, ValueError, "Run number 9 not recognised in cycle mapping file"):
             yaml_parser.get_run_dictionary(run_number_string=9, file_path=file_path)
 
         # What about a range of numbers
-        with assertRaisesRegex(self, ValueError, "Run number 2 not recognised in calibration mapping"):
+        with assertRaisesRegex(self, ValueError, "Run number 2 not recognised in cycle mapping file"):
             yaml_parser.get_run_dictionary(run_number_string="2-8", file_path=file_path)
 
         # Check valid number still works
diff --git a/scripts/test/SANS/CMakeLists.txt b/scripts/test/SANS/CMakeLists.txt
index 5d5c749db332d426e2c53f150ca9d531c813b7e2..5982d712d802e829a58ec14882f432892a584d6d 100644
--- a/scripts/test/SANS/CMakeLists.txt
+++ b/scripts/test/SANS/CMakeLists.txt
@@ -1,4 +1,5 @@
 add_subdirectory(algorithm_detail)
+add_subdirectory(command_interface)
 add_subdirectory(common)
 add_subdirectory(state)
 add_subdirectory(user_file)
diff --git a/scripts/test/SANS/algorithm_detail/CMakeLists.txt b/scripts/test/SANS/algorithm_detail/CMakeLists.txt
index 15aee543d62c8c80f48736ed1b941b869ae5f646..b1338d36883949399575d893faaafb87c79b9479 100644
--- a/scripts/test/SANS/algorithm_detail/CMakeLists.txt
+++ b/scripts/test/SANS/algorithm_detail/CMakeLists.txt
@@ -3,8 +3,11 @@
 ##
 
 set ( TEST_PY_FILES
-  scale_helper_test.py
+  calculate_transmission_helper_test.py
+  merge_reductions_test.py
   q_resolution_calculator_test.py
+  scale_helper_test.py
+  strip_end_nans_test.py
 )
 
 check_tests_valid ( ${CMAKE_CURRENT_SOURCE_DIR} ${TEST_PY_FILES} )
diff --git a/scripts/test/SANS/algorithm_detail/calculate_transmission_helper_test.py b/scripts/test/SANS/algorithm_detail/calculate_transmission_helper_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..796e6a9e5cdf2e93ce047b3aef7a543ff5354d7d
--- /dev/null
+++ b/scripts/test/SANS/algorithm_detail/calculate_transmission_helper_test.py
@@ -0,0 +1,232 @@
+from __future__ import (absolute_import, division, print_function)
+import unittest
+import mantid
+import os
+from mantid.kernel import config
+from mantid.simpleapi import (CreateSampleWorkspace, MaskDetectors, DeleteWorkspace, LoadNexusProcessed, Load, Rebin)
+from mantid.api import AnalysisDataService
+from sans.algorithm_detail.calculate_transmission_helper import (get_masked_det_ids,
+                                                                 get_idf_path_from_workspace,
+                                                                 get_workspace_indices_for_monitors,
+                                                                 apply_flat_background_correction_to_monitors,
+                                                                 apply_flat_background_correction_to_detectors,
+                                                                 get_region_of_interest)
+
+
+class CalculateTransmissionHelperTest(unittest.TestCase):
+    immutable_test_workspace = None
+    region_of_interest_workspace = None
+    roi_file = "roi_file_for_sans_transmission.xml"
+    mask_file = "mas_file_for_sans_transmission.xml"
+    roi_file_path = None
+    mask_file_path = None
+
+    def _assert_collection_elements_are_equal(self, collection1, collection2):
+        tolerance = 1e-7
+        self.assertTrue(len(collection1) == len(collection2))
+        for index in range(len(collection1)):
+            self.assertTrue(abs(collection1[index] - collection2[index]) < tolerance)
+
+    @staticmethod
+    def _create_flat_background_test_workspace(workspace_name):
+        LoadNexusProcessed(Filename="LOQ48127", OutputWorkspace=workspace_name)
+        workspace = AnalysisDataService.retrieve(workspace_name)
+        # Rebin to only have four values at 11, 31, 51, 70.5
+        workspace = Rebin(workspace, "1,20,80")
+        # For each spectrum we set the first two entries to 2 and the other two entries to 4.
+        for index in range(workspace.getNumberHistograms()):
+            data_y = workspace.dataY(index)
+            data_y[0] = 2.
+            data_y[1] = 2.
+            data_y[2] = 4.
+            data_y[3] = 4.
+        return workspace
+
+    @staticmethod
+    def _get_path(file_name):
+        save_directory = config['defaultsave.directory']
+        if not os.path.isdir(save_directory):
+            save_directory = os.getcwd()
+        return os.path.join(save_directory, file_name)
+
+    @staticmethod
+    def _save_file(file_path, content):
+        with open(file_path, "w") as f:
+            f.write(content)
+
+    @classmethod
+    def setUpClass(cls):
+        # A small workspace for general tests
+        test_workspace = LoadNexusProcessed(Filename="LOQ48127")
+        cls.immutable_test_workspace = test_workspace
+
+        # A full workspace on which we can test region of interest selection
+        region_of_interest_workspace = Load(Filename="LOQ74044")
+        cls.region_of_interest_workspace = region_of_interest_workspace
+
+        # A region of interest xml file
+        roi_content = ("<?xml version=\"1.0\"?>\n"
+                       "\t<detector-masking>\n"
+                       "\t\t<group>\n"
+                       "\t\t\t<detids>6990-6996</detids>\n"
+                       "\t\t</group>\n"
+                       "\t</detector-masking>\n")
+        cls.roi_file_path = cls._get_path(cls.roi_file)
+        cls._save_file(cls.roi_file_path, roi_content)
+
+        # A mask file
+        mask_content = ("<?xml version=\"1.0\"?>\n"
+                        "\t<detector-masking>\n"
+                        "\t\t<group>\n"
+                        "\t\t\t<detids>6991</detids>\n"
+                        "\t\t</group>\n"
+                        "\t</detector-masking>\n")
+        cls.mask_file_path = cls._get_path(cls.mask_file)
+        cls._save_file(cls.mask_file_path, mask_content)
+
+    @classmethod
+    def tearDownClass(cls):
+        if cls.immutable_test_workspace:
+            DeleteWorkspace(cls.immutable_test_workspace)
+        if cls.region_of_interest_workspace:
+            DeleteWorkspace(cls.region_of_interest_workspace)
+        if os.path.exists(cls.roi_file_path):
+            os.remove(cls.roi_file_path)
+        if os.path.exists(cls.mask_file_path):
+            os.remove(cls.mask_file_path)
+
+    def test_get_masked_det_ids(self):
+        # Arrange
+        test_workspace_for_masked_det_ids = CreateSampleWorkspace("Histogram")
+        MaskDetectors(Workspace=test_workspace_for_masked_det_ids, DetectorList=[100, 102, 104])
+
+        # Act
+        masked_det_ids = list(get_masked_det_ids(test_workspace_for_masked_det_ids))
+
+        # Assert
+        self.assertTrue(100 in masked_det_ids)
+        self.assertTrue(102 in masked_det_ids)
+        self.assertTrue(104 in masked_det_ids)
+        self.assertEqual(len(masked_det_ids), 3)
+
+        # Clean up
+        DeleteWorkspace(test_workspace_for_masked_det_ids)
+
+    def test_that_gets_idf_from_workspace(self):
+        # Act
+        idf_path = get_idf_path_from_workspace(self.immutable_test_workspace)
+        # Assert
+        self.assertTrue(os.path.exists(idf_path))
+        self.assertTrue(os.path.basename(idf_path) == "LOQ_Definition_20020226-.xml")
+
+    def test_that_extracts_workspace_indices_of_monitor_when_monitors_are_present(self):
+        # Act
+        workspace_indices_generator = get_workspace_indices_for_monitors(self.immutable_test_workspace)
+        # Assert
+        workspace_indices = list(workspace_indices_generator)
+        self.assertTrue(len(workspace_indices) == 2)
+        self.assertTrue(workspace_indices[0] == 0)
+        self.assertTrue(workspace_indices[1] == 1)
+
+    def test_that_returns_empty_generator_if_no_monitors_are_present(self):
+        # Arrange
+        test_workspace_for_monitors = CreateSampleWorkspace("Histogram")
+        # Act
+        workspace_indices_generator = get_workspace_indices_for_monitors(test_workspace_for_monitors)
+        # Assert
+        workspace_indices = list(workspace_indices_generator)
+        self.assertTrue(workspace_indices == [])
+        # Clean up
+        DeleteWorkspace(test_workspace_for_monitors)
+
+    def test_that_applies_flat_background_correction_only_to_monitors(self):
+        # Arrange
+        workspace_name = "monitor_test_workspace"
+        workspace = self._create_flat_background_test_workspace(workspace_name)
+
+        monitor_workspace_indices = [0, 1]
+        # The first monitor (with spectrum index 1 should find a correction value of 2
+        # The second monitor (with spectrum index 2 should find a correction value of 4
+        monitor_spectrum_tof_start = {"1": 1, "2": 50}
+        monitor_spectrum_tof_stop = {"1": 40, "2": 70}
+        tof_general_start = 24
+        tof_general_stop = 38
+        # Act
+        output_workspace = apply_flat_background_correction_to_monitors(workspace,
+                                                                        monitor_workspace_indices,
+                                                                        monitor_spectrum_tof_start,
+                                                                        monitor_spectrum_tof_stop, tof_general_start,
+                                                                        tof_general_stop)
+        # Assert
+        # The first monitor  should have [0, 0, 2, 2], it has 2.1 in the last value, not clear why
+        # The second monitor  should have [0, 0, 0, 0], it has 0.1 in the last value, not clear why. Note that
+        # the flat background correction never goes negative.
+        self._assert_collection_elements_are_equal(output_workspace.dataY(0), [0, 0, 2, 2.1])
+        self._assert_collection_elements_are_equal(output_workspace.dataY(1), [0, 0, 0, 0.1])
+        # The detectors should be unchanged
+        for index in range(2, output_workspace.getNumberHistograms()):
+            self._assert_collection_elements_are_equal(output_workspace.dataY(index), [2, 2, 4, 4])
+
+        # Clean up
+        DeleteWorkspace(workspace)
+
+    def test_that_applies_flat_background_correction_only_to_detectors(self):
+        # Arrange
+        workspace_name = "monitor_test_workspace"
+        workspace = self._create_flat_background_test_workspace(workspace_name)
+
+        start_tof = "1"
+        stop_tof = "40"
+
+        # Act
+        output_workspace = apply_flat_background_correction_to_detectors(workspace, start_tof, stop_tof)
+
+        # Assert
+        # The monitors should not have changed
+        self._assert_collection_elements_are_equal(output_workspace.dataY(0), [2., 2., 4., 4.])
+        self._assert_collection_elements_are_equal(output_workspace.dataY(1), [2., 2., 4., 4.])
+        # The detectors should be subtracted by 2. The last value seems to be slightly off
+        for index in range(2, output_workspace.getNumberHistograms()):
+            self._assert_collection_elements_are_equal(output_workspace.dataY(index), [0., 0., 2., 2.1])
+
+        # Clean up
+        DeleteWorkspace(workspace)
+
+    def test_that_gets_region_of_interest_for_radius_only_gets_correct_ids(self):
+        # Act
+        detector_ids = get_region_of_interest(self.region_of_interest_workspace, radius=0.01)
+
+        # Assert
+        # The one centimeter radius should capture [7872, 7873, 7874, 8000, 8001, 8002, 8003, 8128, 8129, 8130]
+        expected_ids = [7872, 7873, 7874, 8000, 8001, 8002, 8003, 8128, 8129, 8130]
+        self._assert_collection_elements_are_equal(detector_ids, expected_ids)
+
+    def test_that_gets_region_of_interest_for_roi_file(self):
+        # Act
+        detector_ids = get_region_of_interest(self.region_of_interest_workspace, roi_files=[self.roi_file_path])
+        # Assert
+        expected_detector_ids = [6990, 6991, 6992, 6993, 6994, 6995, 6996]
+        self._assert_collection_elements_are_equal(detector_ids, expected_detector_ids)
+
+    def test_that_gets_region_of_interest_for_roi_mask_and_radius(self):
+        # Act
+        detector_ids = get_region_of_interest(self.region_of_interest_workspace, roi_files=[self.roi_file_path],
+                                              mask_files=[self.mask_file_path], radius=0.01)
+        # Assert
+        # From Radius: [7872, 7873, 7874, 8000, 8001, 8002, 8003, 8128, 8129, 8130]
+        # From Roi File: [6990, 6991, 6992, 6993, 6994, 6995, 6996]
+        # Mask file removes: [6991]
+        expected_detector_ids = [6990, 6992, 6993, 6994, 6995, 6996, 7872, 7873, 7874, 8000,
+                                 8001, 8002, 8003, 8128, 8129, 8130]
+        self._assert_collection_elements_are_equal(detector_ids, expected_detector_ids)
+
+    def test_that_returns_empty_list_if_nothing_is_specified(self):
+        # Act
+        detector_ids = get_region_of_interest(self.region_of_interest_workspace)
+        # Assert
+        expected_detector_ids = []
+        self._assert_collection_elements_are_equal(detector_ids, expected_detector_ids)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/scripts/test/SANS/algorithm_detail/merge_reductions_test.py b/scripts/test/SANS/algorithm_detail/merge_reductions_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..1899efca258601a3297be98f1abf7ac5996e72ee
--- /dev/null
+++ b/scripts/test/SANS/algorithm_detail/merge_reductions_test.py
@@ -0,0 +1,202 @@
+from __future__ import (absolute_import, division, print_function)
+import unittest
+import mantid
+from sans.algorithm_detail.merge_reductions import (MergeFactory, ISIS1DMerger)
+from sans.algorithm_detail.bundles import OutputPartsBundle
+
+from sans.state.reduction_mode import StateReductionMode
+from sans.test_helper.test_director import TestDirector
+
+from sans.common.enums import (ISISReductionMode, ReductionDimensionality, FitModeForMerge)
+from sans.common.general_functions import create_unmanaged_algorithm
+from sans.common.constants import EMPTY_NAME
+from sans.common.enums import (DataType, ISISReductionMode)
+
+
+class MergeReductionsTest(unittest.TestCase):
+    @staticmethod
+    def create_1D_workspace(data_x, data_y):
+        create_name = "CreateWorkspace"
+        create_options = {'DataX': data_x,
+                          'DataY': data_y,
+                          'NSpec': 1,
+                          'UnitX': 'MomentumTransfer',
+                          "OutputWorkspace": EMPTY_NAME}
+        create_alg = create_unmanaged_algorithm(create_name, **create_options)
+        create_alg.execute()
+        return create_alg.getProperty('OutputWorkspace').value
+
+    @staticmethod
+    def _get_simple_state(fit_type=FitModeForMerge.NoFit, scale=1.0, shift=0.0):
+        # Set the reduction parameters
+        reduction_info = StateReductionMode()
+        reduction_info.reduction_mode = ISISReductionMode.Merged
+        reduction_info.dimensionality = ReductionDimensionality.TwoDim
+        reduction_info.merge_shift = shift
+        reduction_info.merge_scale = scale
+        reduction_info.merge_fit_mode = fit_type
+
+        # Get the sample state
+        test_director = TestDirector()
+        test_director.set_states(reduction_state=reduction_info)
+        return test_director.construct()
+
+    @staticmethod
+    def _create_workspaces(state, data_type, data_x_lab, data_y_lab_count, data_y_lab_norm,
+                           data_x_hab, data_y_hab_count, data_y_hab_norm):
+        lab_count = MergeReductionsTest.create_1D_workspace(data_x_lab, data_y_lab_count)
+        lab_norm = MergeReductionsTest.create_1D_workspace(data_x_lab, data_y_lab_norm)
+        lab_bundle = OutputPartsBundle(state=state, data_type=data_type, reduction_mode=ISISReductionMode.LAB,
+                                       output_workspace_count=lab_count, output_workspace_norm=lab_norm)
+
+        hab_count = MergeReductionsTest.create_1D_workspace(data_x_hab, data_y_hab_count)
+        hab_norm = MergeReductionsTest.create_1D_workspace(data_x_hab, data_y_hab_norm)
+        hab_bundle = OutputPartsBundle(state=state, data_type=data_type, reduction_mode=ISISReductionMode.HAB,
+                                       output_workspace_count=hab_count, output_workspace_norm=hab_norm)
+        return lab_bundle, hab_bundle
+
+    @staticmethod
+    def _provide_data(state):
+        # Create data for sample
+        data_x_lab = list(range(0, 10))
+        data_y_lab_count = [2.]*10
+        data_y_lab_norm = [1.] * 10
+
+        data_x_hab = list(range(0, 10))
+        data_y_hab_count = [3.] * 10
+        data_y_hab_norm = [4.] * 10
+        sample_lab, sample_hab = MergeReductionsTest._create_workspaces(state, DataType.Sample, data_x_lab,
+                                                                        data_y_lab_count, data_y_lab_norm,
+                                                                        data_x_hab, data_y_hab_count, data_y_hab_norm)
+
+        # Create data for can
+        data_x_lab = list(range(0, 10))
+        data_y_lab_count = [5.]*10
+        data_y_lab_norm = [6.] * 10
+
+        data_x_hab = list(range(0, 10))
+        data_y_hab_count = [7.] * 10
+        data_y_hab_norm = [8.] * 10
+        can_lab, can_hab = MergeReductionsTest._create_workspaces(state, DataType.Can, data_x_lab,
+                                                                  data_y_lab_count, data_y_lab_norm,
+                                                                  data_x_hab, data_y_hab_count, data_y_hab_norm)
+        return sample_lab, sample_hab, can_lab, can_hab
+
+    def test_that_correct_merger_is_generated(self):
+        # Arrange
+        state = self._get_simple_state()
+        merge_factory = MergeFactory()
+
+        # Act
+        merger = merge_factory.create_merger(state)
+
+        # Assert
+        self.assertTrue(isinstance(merger, ISIS1DMerger))
+
+    def test_that_can_merge_without_fitting(self):
+        # Arrange
+        fit_type = FitModeForMerge.NoFit
+        scale_input = 32.0
+        shift_input = 12.65
+        state = self._get_simple_state(fit_type, scale_input, shift_input)
+        merge_factory = MergeFactory()
+        merger = merge_factory.create_merger(state)
+
+        sample_lab, sample_hab, can_lab, can_hab = self._provide_data(state)
+
+        bundles = {ISISReductionMode.LAB: [sample_lab, can_lab],
+                   ISISReductionMode.HAB: [sample_hab, can_hab]}
+
+        # Act
+        result = merger.merge(bundles)
+        merged_workspace = result.merged_workspace
+
+        scale = result.scale
+        shift = result.shift
+        self.assertTrue(abs(scale - scale_input) < 1e-4)
+        self.assertTrue(abs(shift - shift_input) < 1e-4)
+
+        # There is an overlap of two bins between HAB and LAB, the values are tested in SANSStitch
+        self.assertTrue(merged_workspace.blocksize() == 10)
+
+    def test_that_can_merge_fitting(self):
+        # Arrange
+        fit_type = FitModeForMerge.Both
+        scale_input = 1.67
+        shift_input = 2.7
+        state = self._get_simple_state(fit_type, scale_input, shift_input)
+        merge_factory = MergeFactory()
+        merger = merge_factory.create_merger(state)
+
+        sample_lab, sample_hab, can_lab, can_hab = self._provide_data(state)
+        bundles = {ISISReductionMode.LAB: [sample_lab, can_lab],
+                   ISISReductionMode.HAB: [sample_hab, can_hab]}
+
+        # Act
+        result = merger.merge(bundles)
+        merged_workspace = result.merged_workspace
+
+        self.assertTrue(merged_workspace.blocksize() == 10)
+
+        scale = result.scale
+        shift = result.shift
+        self.assertTrue(scale != scale_input)
+        self.assertTrue(shift != shift_input)
+        self.assertTrue(abs(scale - (-15.0)) < 1e-4)
+        self.assertTrue(abs(shift - 0.0472222222222) < 1e-4)
+
+    def test_that_can_merge_with_shift_only_fitting(self):
+        # Arrange
+        fit_type = FitModeForMerge.ShiftOnly
+        scale_input = 1.67
+        shift_input = 2.7
+        state = self._get_simple_state(fit_type, scale_input, shift_input)
+        merge_factory = MergeFactory()
+        merger = merge_factory.create_merger(state)
+
+        sample_lab, sample_hab, can_lab, can_hab = self._provide_data(state)
+        bundles = {ISISReductionMode.LAB: [sample_lab, can_lab],
+                   ISISReductionMode.HAB: [sample_hab, can_hab]}
+
+        # Act
+        result = merger.merge(bundles)
+        merged_workspace = result.merged_workspace
+
+        self.assertTrue(merged_workspace.blocksize() == 10)
+
+        scale = result.scale
+        shift = result.shift
+
+        self.assertTrue(shift != shift_input)
+        self.assertTrue(abs(scale - scale_input) < 1e-4)
+        self.assertTrue(abs(shift - 0.823602794411) < 1e-4)
+
+    def test_that_can_merge_with_scale_only_fitting(self):
+        # Arrange
+        fit_type = FitModeForMerge.ScaleOnly
+        scale_input = 1.67
+        shift_input = 2.7
+        state = self._get_simple_state(fit_type, scale_input, shift_input)
+        merge_factory = MergeFactory()
+        merger = merge_factory.create_merger(state)
+
+        sample_lab, sample_hab, can_lab, can_hab = self._provide_data(state)
+        bundles = {ISISReductionMode.LAB: [sample_lab, can_lab],
+                   ISISReductionMode.HAB: [sample_hab, can_hab]}
+
+        # Act
+        result = merger.merge(bundles)
+        merged_workspace = result.merged_workspace
+
+        self.assertTrue(merged_workspace.blocksize() == 10)
+
+        scale = result.scale
+        shift = result.shift
+
+        self.assertTrue(scale != scale_input)
+        self.assertTrue(abs(scale-1.0) < 1e-4)
+        self.assertTrue(abs(shift-shift_input) < 1e-4)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/scripts/test/SANS/algorithm_detail/strip_end_nans_test.py b/scripts/test/SANS/algorithm_detail/strip_end_nans_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..221797117d64bd426934cf7aa315cf69353b7e5c
--- /dev/null
+++ b/scripts/test/SANS/algorithm_detail/strip_end_nans_test.py
@@ -0,0 +1,44 @@
+from __future__ import (absolute_import, division, print_function)
+import unittest
+import mantid
+from mantid.api import AlgorithmManager
+from sans.algorithm_detail.strip_end_nans_and_infs import strip_end_nans
+
+
+class StripEndNansTest(unittest.TestCase):
+    def _do_test(self, data_x, data_y):
+        # Arrange
+        alg_ws = AlgorithmManager.createUnmanaged("CreateWorkspace")
+        alg_ws.setChild(True)
+        alg_ws.initialize()
+        alg_ws.setProperty("OutputWorkspace", "test")
+
+        alg_ws.setProperty("DataX", data_x)
+        alg_ws.setProperty("DataY", data_y)
+        alg_ws.execute()
+        workspace = alg_ws.getProperty("OutputWorkspace").value
+
+        # Act
+        cropped_workspace = strip_end_nans(workspace)
+        # Assert
+        data_y = cropped_workspace.dataY(0)
+        self.assertTrue(len(data_y) == 5)
+        self.assertTrue(data_y[0] == 36.)
+        self.assertTrue(data_y[1] == 44.)
+        self.assertTrue(data_y[2] == 52.)
+        self.assertTrue(data_y[3] == 63.)
+        self.assertTrue(data_y[4] == 75.)
+
+    def test_that_can_strip_end_nans_and_infs_for_point_workspace(self):
+        data_x = [1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]
+        data_y = [float("Nan"), float("Inf"), 36., 44., 52., 63., 75., float("Inf"), float("Nan"), float("Inf")]
+        self._do_test(data_x, data_y)
+
+    def test_that_can_strip_end_nans_and_infs_for_histo_workspace(self):
+        data_x = [1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.]
+        data_y = [float("Nan"), float("Inf"), 36., 44., 52., 63., 75., float("Inf"), float("Nan"), float("Inf")]
+        self._do_test(data_x, data_y)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/scripts/test/SANS/command_interface/CMakeLists.txt b/scripts/test/SANS/command_interface/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..3826c64dc13722c4962608f374676c747c0b71f7
--- /dev/null
+++ b/scripts/test/SANS/command_interface/CMakeLists.txt
@@ -0,0 +1,13 @@
+##
+## Tests for SANS
+##
+
+set ( TEST_PY_FILES
+  batch_csv_file_parser_test.py
+  command_interface_state_director_test.py
+)
+
+check_tests_valid ( ${CMAKE_CURRENT_SOURCE_DIR} ${TEST_PY_FILES} )
+
+# Prefix for test name=PythonAlgorithms
+pyunittest_add_test ( ${CMAKE_CURRENT_SOURCE_DIR} PythonAlgorithmsSANS ${TEST_PY_FILES} )
diff --git a/scripts/test/SANS/command_interface/batch_csv_file_parser_test.py b/scripts/test/SANS/command_interface/batch_csv_file_parser_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..98996ca7ef0d9d5ad2dd2a921cb744729a598207
--- /dev/null
+++ b/scripts/test/SANS/command_interface/batch_csv_file_parser_test.py
@@ -0,0 +1,144 @@
+from __future__ import (absolute_import, division, print_function)
+import unittest
+import os
+import mantid
+from sans.common.enums import BatchReductionEntry
+from sans.common.constants import ALL_PERIODS
+from sans.command_interface.batch_csv_file_parser import BatchCsvParser
+
+
+class BatchCsvParserTest(unittest.TestCase):
+
+    @staticmethod
+    def _save_to_csv(content):
+        test_file_path = os.path.join(mantid.config.getString('defaultsave.directory'), 'sans_batch_test_file.csv')
+        BatchCsvParserTest._remove_csv(test_file_path)
+
+        with open(test_file_path, 'w') as f:
+            f.write(content)
+        return test_file_path
+
+    @staticmethod
+    def _remove_csv(test_file_path):
+        if os.path.exists(test_file_path):
+            os.remove(test_file_path)
+
+    def test_that_raises_when_unknown_keyword_is_used(self):
+        content = "# MANTID_BATCH_FILE add more text here\n" \
+                   "sample_sans,74044,output_as,test,new_key_word,test\n"
+        batch_file_path = BatchCsvParserTest._save_to_csv(content)
+        parser = BatchCsvParser(batch_file_path)
+        self.assertRaises(RuntimeError, parser.parse_batch_file)
+        BatchCsvParserTest._remove_csv(batch_file_path)
+
+    def test_raises_if_the_batch_file_contains_an_uneven_number_of_entries(self):
+        content = "# MANTID_BATCH_FILE add more text here\n" \
+                   "sample_sans,74044,sample_trans,74024,sample_direct_beam,74014,can_sans,74019,can_trans,74020," \
+                   "can_direct_beam,output_as, first_eim\n"
+        batch_file_path = BatchCsvParserTest._save_to_csv(content)
+        parser = BatchCsvParser(batch_file_path)
+        self.assertRaises(RuntimeError, parser.parse_batch_file)
+        BatchCsvParserTest._remove_csv(batch_file_path)
+
+    def test_that_raises_when_sample_scatter_is_missing(self):
+        content = "# MANTID_BATCH_FILE add more text here\n" \
+                   "sample_sans,,output_as,test_file\n"
+        batch_file_path = BatchCsvParserTest._save_to_csv(content)
+        parser = BatchCsvParser(batch_file_path)
+        self.assertRaises(RuntimeError, parser.parse_batch_file)
+        BatchCsvParserTest._remove_csv(batch_file_path)
+
+    def test_that_raises_when_output_is_missing(self):
+        content = "# MANTID_BATCH_FILE add more text here\n" \
+                   "sample_sans,test,output_as,\n"
+        batch_file_path = BatchCsvParserTest._save_to_csv(content)
+        parser = BatchCsvParser(batch_file_path)
+        self.assertRaises(RuntimeError, parser.parse_batch_file)
+        BatchCsvParserTest._remove_csv(batch_file_path)
+
+    def test_that_raises_when_sample_transmission_is_specified_incompletely(self):
+        content = "# MANTID_BATCH_FILE add more text here\n" \
+                   "sample_sans,test,output_as,test, sample_trans,test, sample_direct_beam,\n"
+        batch_file_path = BatchCsvParserTest._save_to_csv(content)
+        parser = BatchCsvParser(batch_file_path)
+        self.assertRaises(RuntimeError, parser.parse_batch_file)
+        BatchCsvParserTest._remove_csv(batch_file_path)
+
+    def test_that_raises_when_can_transmission_is_specified_incompletely(self):
+        content = "# MANTID_BATCH_FILE add more text here\n" \
+                   "sample_sans,test,output_as,test, can_trans,, can_direct_beam, test\n"
+        batch_file_path = BatchCsvParserTest._save_to_csv(content)
+        parser = BatchCsvParser(batch_file_path)
+        self.assertRaises(RuntimeError, parser.parse_batch_file)
+        BatchCsvParserTest._remove_csv(batch_file_path)
+
+    def test_that_raises_when_can_transmission_is_specified_but_no_can_scatter(self):
+        content = "# MANTID_BATCH_FILE add more text here\n" \
+                   "sample_sans,test,output_as,test, can_trans,, can_direct_beam, test\n"
+        batch_file_path = BatchCsvParserTest._save_to_csv(content)
+        parser = BatchCsvParser(batch_file_path)
+        self.assertRaises(RuntimeError, parser.parse_batch_file)
+        BatchCsvParserTest._remove_csv(batch_file_path)
+
+    def test_that_parses_two_lines_correctly(self):
+        content = "# MANTID_BATCH_FILE add more text here\n" \
+                   "sample_sans,1,sample_trans,2,sample_direct_beam,3,output_as,test_file,user_file,user_test_file\n" \
+                   "sample_sans,1,can_sans,2,output_as,test_file2\n"
+        batch_file_path = BatchCsvParserTest._save_to_csv(content)
+        parser = BatchCsvParser(batch_file_path)
+
+        # Act
+        output = parser.parse_batch_file()
+
+        # Assert
+        self.assertTrue(len(output) == 2)
+
+        first_line = output[0]
+        # Should have 5 user specified entries and 3 period entries
+        self.assertTrue(len(first_line) == 8)
+        self.assertTrue(first_line[BatchReductionEntry.SampleScatter] == "1")
+        self.assertTrue(first_line[BatchReductionEntry.SampleScatterPeriod] == ALL_PERIODS)
+        self.assertTrue(first_line[BatchReductionEntry.SampleTransmission] == "2")
+        self.assertTrue(first_line[BatchReductionEntry.SampleTransmissionPeriod] == ALL_PERIODS)
+        self.assertTrue(first_line[BatchReductionEntry.SampleDirect] == "3")
+        self.assertTrue(first_line[BatchReductionEntry.SampleDirectPeriod] == ALL_PERIODS)
+        self.assertTrue(first_line[BatchReductionEntry.Output] == "test_file")
+        self.assertTrue(first_line[BatchReductionEntry.UserFile] == "user_test_file")
+        second_line = output[1]
+
+        # Should have 3 user specified entries and 2 period entries
+        self.assertTrue(len(second_line) == 5)
+        self.assertTrue(second_line[BatchReductionEntry.SampleScatter] == "1")
+        self.assertTrue(second_line[BatchReductionEntry.SampleScatterPeriod] == ALL_PERIODS)
+        self.assertTrue(second_line[BatchReductionEntry.CanScatter] == "2")
+        self.assertTrue(second_line[BatchReductionEntry.CanScatterPeriod] == ALL_PERIODS)
+        self.assertTrue(second_line[BatchReductionEntry.Output] == "test_file2")
+
+        BatchCsvParserTest._remove_csv(batch_file_path)
+
+    def test_that_parses_period_selection(self):
+        content = "# MANTID_BATCH_FILE add more text here\n" \
+                   "sample_sans,1p7,can_sans,2P3,output_as,test_file2\n"
+        batch_file_path = BatchCsvParserTest._save_to_csv(content)
+        parser = BatchCsvParser(batch_file_path)
+
+        # Act
+        output = parser.parse_batch_file()
+
+        # Assert
+        self.assertTrue(len(output) == 1)
+
+        first_line = output[0]
+        # Should have 5 user specified entries and 3 period entries
+        self.assertTrue(len(first_line) == 5)
+        self.assertTrue(first_line[BatchReductionEntry.SampleScatter] == "1")
+        self.assertTrue(first_line[BatchReductionEntry.SampleScatterPeriod] == 7)
+        self.assertTrue(first_line[BatchReductionEntry.CanScatter] == "2")
+        self.assertTrue(first_line[BatchReductionEntry.CanScatterPeriod] == 3)
+        self.assertTrue(first_line[BatchReductionEntry.Output] == "test_file2")
+
+        BatchCsvParserTest._remove_csv(batch_file_path)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/scripts/test/SANS/command_interface/command_interface_state_director_test.py b/scripts/test/SANS/command_interface/command_interface_state_director_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..ffcf56e17b9d1fc9dee733a5fbc8ba8bc1d1afd3
--- /dev/null
+++ b/scripts/test/SANS/command_interface/command_interface_state_director_test.py
@@ -0,0 +1,201 @@
+from __future__ import (absolute_import, division, print_function)
+import unittest
+import mantid
+from sans.command_interface.command_interface_state_director import (NParameterCommand, NParameterCommandId,
+                                                                     CommandInterfaceStateDirector, DataCommand,
+                                                                     DataCommandId, FitData)
+from sans.common.enums import (SANSFacility, RebinType, DetectorType, ReductionDimensionality,
+                               FitType, RangeStepType, ISISReductionMode, FitModeForMerge, DataType)
+
+
+class CommandInterfaceStateDirectorTest(unittest.TestCase):
+    def _assert_raises_nothing(self, func, parameter):
+        try:
+            func(parameter)
+        except:  # noqa
+            self.fail()
+
+    def test_can_set_commands_without_exceptions(self):
+        command_interface = CommandInterfaceStateDirector(SANSFacility.ISIS)
+
+        # User file
+        command = NParameterCommand(command_id=NParameterCommandId.user_file,
+                                    values=["test_user_file_sans2d.txt"])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Mask
+        command = NParameterCommand(command_id=NParameterCommandId.mask,
+                                    values=["MASK/ FRONT H197>H199"])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Monitor spectrum (incident monitor for monitor normalization)
+        command = NParameterCommand(command_id=NParameterCommandId.incident_spectrum,
+                                    values=[1, True, False])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Transmission spectrum (incident monitor for transmission calculation)
+        command = NParameterCommand(command_id=NParameterCommandId.incident_spectrum, values=[7, False, True])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Reduction Dimensionality One Dim
+        command = NParameterCommand(command_id=NParameterCommandId.reduction_dimensionality,
+                                    values=[ReductionDimensionality.OneDim])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Reduction Dimensionality Two Dim
+        command = NParameterCommand(command_id=NParameterCommandId.reduction_dimensionality,
+                                    values=[ReductionDimensionality.TwoDim])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Sample offset
+        command = NParameterCommand(command_id=NParameterCommandId.sample_offset, values=[23.6])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Sample scatter data
+        command = DataCommand(command_id=DataCommandId.sample_scatter, file_name="SANS2D00022024", period=3)
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Detector
+        command = NParameterCommand(command_id=NParameterCommandId.detector, values=[ISISReductionMode.HAB])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Gravity
+        command = NParameterCommand(command_id=NParameterCommandId.gravity, values=[True, 12.4])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Set centre
+        command = NParameterCommand(command_id=NParameterCommandId.centre, values=[12.4, 23.54, DetectorType.HAB])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # # Trans fit
+        command = NParameterCommand(command_id=NParameterCommandId.trans_fit, values=[FitData.Can, 10.4, 12.54,
+                                                                                      FitType.Log, 0])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Front detector rescale
+        command = NParameterCommand(command_id=NParameterCommandId.front_detector_rescale, values=[1.2, 2.4, True,
+                                                                                                   False, None, 7.2])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Event slices
+        command = NParameterCommand(command_id=NParameterCommandId.event_slices, values="1-23,55:3:65")
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Flood file
+        command = NParameterCommand(command_id=NParameterCommandId.flood_file, values=["test", DetectorType.LAB])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Phi limits
+        command = NParameterCommand(command_id=NParameterCommandId.phi_limit, values=[12.5, 123.6, False])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Wavelength correction file
+        command = NParameterCommand(command_id=NParameterCommandId.wavelength_correction_file,
+                                    values=["test", DetectorType.HAB])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Radius mask
+        command = NParameterCommand(command_id=NParameterCommandId.mask_radius,
+                                    values=[23.5, 234.7])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Wavelength limits
+        command = NParameterCommand(command_id=NParameterCommandId.wavelength_limit,
+                                    values=[1.23, 23., 1.1, RangeStepType.Lin])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # QXY Limits
+        command = NParameterCommand(command_id=NParameterCommandId.qxy_limit,
+                                    values=[1.23, 23., 1.1, RangeStepType.Lin])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Process all commands
+        state = command_interface.process_commands()
+
+        # Assert
+        # We check here that the elements we set up above (except for from the user file) are being applied
+        self.assertTrue(state is not None)
+        self.assertTrue(state.mask.detectors[DetectorType.to_string(DetectorType.HAB)].range_horizontal_strip_start[-1]
+                        == 197)
+        self.assertTrue(state.mask.detectors[DetectorType.to_string(DetectorType.HAB)].range_horizontal_strip_stop[-1]
+                        == 199)
+        self.assertTrue(state.adjustment.normalize_to_monitor.incident_monitor == 1)
+        self.assertTrue(state.adjustment.normalize_to_monitor.rebin_type is RebinType.InterpolatingRebin)
+        self.assertTrue(state.adjustment.calculate_transmission.incident_monitor == 7)
+        self.assertTrue(state.adjustment.calculate_transmission.rebin_type is RebinType.Rebin)
+        self.assertTrue(state.reduction.reduction_dimensionality is ReductionDimensionality.TwoDim)
+        self.assertTrue(state.convert_to_q.reduction_dimensionality is ReductionDimensionality.TwoDim)
+        self.assertTrue(state.move.sample_offset == 23.6/1000.)
+        self.assertTrue(state.data.sample_scatter == "SANS2D00022024")
+        self.assertTrue(state.data.sample_scatter_period == 3)
+        self.assertTrue(state.reduction.reduction_mode is ISISReductionMode.HAB)
+        self.assertTrue(state.convert_to_q.use_gravity)
+        self.assertTrue(state.convert_to_q.gravity_extra_length == 12.4)
+        self.assertTrue(state.move.detectors[DetectorType.to_string(DetectorType.HAB)].sample_centre_pos1 == 12.4/1000.)
+        self.assertTrue(state.move.detectors[DetectorType.to_string(DetectorType.HAB)].sample_centre_pos2
+                        == 23.54/1000.)
+        self.assertTrue(state.adjustment.calculate_transmission.fit[DataType.to_string(DataType.Can)].fit_type
+                        is FitType.Log)
+        self.assertTrue(state.adjustment.calculate_transmission.fit[DataType.to_string(DataType.Can)].polynomial_order
+                        == 0)
+
+        self.assertTrue(state.adjustment.calculate_transmission.fit[DataType.to_string(DataType.Can)].wavelength_low
+                        == 10.4)
+        self.assertTrue(state.adjustment.calculate_transmission.fit[DataType.to_string(DataType.Can)].wavelength_high
+                        == 12.54)
+
+        self.assertTrue(state.reduction.merge_scale == 1.2)
+        self.assertTrue(state.reduction.merge_shift == 2.4)
+        self.assertTrue(state.reduction.merge_fit_mode is FitModeForMerge.ScaleOnly)
+        self.assertTrue(state.reduction.merge_range_min is None)
+        self.assertTrue(state.reduction.merge_range_max == 7.2)
+
+        # Event slices
+        start_values = state.slice.start_time
+        end_values = state.slice.end_time
+        expected_start_values = [1., 55., 58., 61., 64.]
+        expected_end_values = [23., 58., 61., 64., 65.]
+        for s1, e1, s2, e2 in zip(start_values, end_values, expected_start_values, expected_end_values):
+            self.assertTrue(s1 == s2)
+            self.assertTrue(e1 == e2)
+
+        self.assertTrue(state.adjustment.wavelength_and_pixel_adjustment.adjustment_files[
+                            DetectorType.to_string(DetectorType.LAB)].pixel_adjustment_file == "test")
+        self.assertTrue(state.mask.phi_min == 12.5)
+        self.assertTrue(state.mask.phi_max == 123.6)
+        self.assertFalse(state.mask.use_mask_phi_mirror)
+        self.assertTrue(state.adjustment.wavelength_and_pixel_adjustment.adjustment_files[
+                            DetectorType.to_string(DetectorType.HAB)].wavelength_adjustment_file == "test")
+        self.assertTrue(state.mask.radius_min == 23.5 / 1000.)
+        self.assertTrue(state.mask.radius_max == 234.7 / 1000.)
+        self.assertTrue(state.wavelength.wavelength_low == 1.23)
+        self.assertTrue(state.adjustment.normalize_to_monitor.wavelength_high == 23.)
+        self.assertTrue(state.adjustment.wavelength_and_pixel_adjustment.wavelength_step == 1.1)
+        self.assertTrue(state.adjustment.calculate_transmission.wavelength_step_type is RangeStepType.Lin)
+        self.assertTrue(state.convert_to_q.q_xy_max == 23.)
+        self.assertTrue(state.convert_to_q.q_xy_step == 1.1)
+        self.assertTrue(state.convert_to_q.q_xy_step_type is RangeStepType.Lin)
+
+    def test_that_can_remove_last_command(self):
+        # Arrange
+        command_interface = CommandInterfaceStateDirector(SANSFacility.ISIS)
+        command_interface.add_command(NParameterCommand(command_id=NParameterCommandId.user_file,
+                                                        values=["file_1.txt"]))
+        command_interface.add_command(NParameterCommand(command_id=NParameterCommandId.user_file,
+                                                        values=["file_2.txt"]))
+        command_interface.add_command(NParameterCommand(command_id=NParameterCommandId.user_file,
+                                                        values=["file_3.txt"]))
+        # Act
+        commands = command_interface.get_commands()
+        self.assertTrue(len(commands) == 3)
+
+        command_interface.remove_last_user_file()
+
+        # Assert
+        self.assertTrue(len(commands) == 2)
+        last_command = commands[-1]
+        self.assertTrue(last_command.values == ["file_2.txt"])
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/scripts/test/SANSUtilityTest.py b/scripts/test/SANSUtilityTest.py
index 41c4f82cfd8e317b35902390e69285919933a434..796bc47d9d18d2c439d9737e7a12f0c2e609bd39 100644
--- a/scripts/test/SANSUtilityTest.py
+++ b/scripts/test/SANSUtilityTest.py
@@ -1598,10 +1598,13 @@ class TestSelectNewDetector(unittest.TestCase):
     def test_that_for_SANS2D_correct_settings_are_selected(self):
         self.assertTrue(su.get_correct_combinDet_setting("SANS2d", "rear") == "rear")
         self.assertTrue(su.get_correct_combinDet_setting("SANS2D", "FRONT") == "front")
+        self.assertTrue(su.get_correct_combinDet_setting("SANS2d", "rear-detector") == "rear")
+        self.assertTrue(su.get_correct_combinDet_setting("SANS2D", "FRONT-DETECTOR") == "front")
         self.assertTrue(su.get_correct_combinDet_setting("sAnS2d", "boTH") == "both")
         self.assertTrue(su.get_correct_combinDet_setting("sans2d", "merged") == "merged")
 
     def test_that_for_LOQ_correct_settings_are_selected(self):
+        self.assertTrue(su.get_correct_combinDet_setting("Loq", "main-detector-bank") == "rear")
         self.assertTrue(su.get_correct_combinDet_setting("Loq", "main") == "rear")
         self.assertTrue(su.get_correct_combinDet_setting("LOQ", "Hab") == "front")
         self.assertTrue(su.get_correct_combinDet_setting("lOQ", "boTH") == "both")